blob: ec5df575299525fe6ce587e42daae714248e3fd7 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050017
18#define pr_fmt(fmt) "SVM: " fmt
19
Avi Kivityedf88412007-12-16 11:02:48 +020020#include <linux/kvm_host.h>
21
Eddie Dong85f455f2007-07-06 12:20:49 +030022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020025#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +010026#include "cpuid.h"
Wei Huang25462f72015-06-19 15:45:05 +020027#include "pmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040028
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070030#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020031#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080032#include <linux/vmalloc.h>
33#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040034#include <linux/sched.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040035#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -050037#include <linux/amd-iommu.h>
38#include <linux/hashtable.h>
Josh Poimboeufc207aee2017-06-28 10:11:06 -050039#include <linux/frame.h>
Brijesh Singhe9df0942017-12-04 10:57:33 -060040#include <linux/psp-sev.h>
Brijesh Singh1654efc2017-12-04 10:57:34 -060041#include <linux/file.h>
Brijesh Singh89c50582017-12-04 10:57:35 -060042#include <linux/pagemap.h>
43#include <linux/swap.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080044
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -050045#include <asm/apic.h>
Joerg Roedel1018faa2012-02-29 14:57:32 +010046#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020047#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040048#include <asm/desc.h>
Paolo Bonzinifacb0132014-02-21 10:32:27 +010049#include <asm/debugreg.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020050#include <asm/kvm_para.h>
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -050051#include <asm/irq_remapping.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080052
Eduardo Habkost63d11422008-11-17 19:03:20 -020053#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030054#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020055
Avi Kivity4ecac3f2008-05-13 13:23:38 +030056#define __ex(x) __kvm_handle_fault_on_reboot(x)
57
Avi Kivity6aa8b732006-12-10 02:21:36 -080058MODULE_AUTHOR("Qumranet");
59MODULE_LICENSE("GPL");
60
Josh Triplettae759542012-03-28 11:32:28 -070061static const struct x86_cpu_id svm_cpu_id[] = {
62 X86_FEATURE_MATCH(X86_FEATURE_SVM),
63 {}
64};
65MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
66
Avi Kivity6aa8b732006-12-10 02:21:36 -080067#define IOPM_ALLOC_ORDER 2
68#define MSRPM_ALLOC_ORDER 1
69
Avi Kivity6aa8b732006-12-10 02:21:36 -080070#define SEG_TYPE_LDT 2
71#define SEG_TYPE_BUSY_TSS16 3
72
Andre Przywara6bc31bd2010-04-11 23:07:28 +020073#define SVM_FEATURE_NPT (1 << 0)
74#define SVM_FEATURE_LBRV (1 << 1)
75#define SVM_FEATURE_SVML (1 << 2)
76#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010077#define SVM_FEATURE_TSC_RATE (1 << 4)
78#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
79#define SVM_FEATURE_FLUSH_ASID (1 << 6)
80#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020081#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030082
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -050083#define SVM_AVIC_DOORBELL 0xc001011b
84
Joerg Roedel410e4d52009-08-07 11:49:44 +020085#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
86#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
87#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
88
Joerg Roedel24e09cb2008-02-13 18:58:47 +010089#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
90
Joerg Roedelfbc0db72011-03-25 09:44:46 +010091#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010092#define TSC_RATIO_MIN 0x0000000000000001ULL
93#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010094
Dan Carpenter5446a972016-05-23 13:20:10 +030095#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050096
97/*
98 * 0xff is broadcast, so the max index allowed for physical APIC ID
99 * table is 0xfe. APIC IDs above 0xff are reserved.
100 */
101#define AVIC_MAX_PHYSICAL_ID_COUNT 255
102
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500103#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
104#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
105#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
106
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500107/* AVIC GATAG is encoded using VM and VCPU IDs */
108#define AVIC_VCPU_ID_BITS 8
109#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
110
111#define AVIC_VM_ID_BITS 24
112#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
113#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
114
115#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
116 (y & AVIC_VCPU_ID_MASK))
117#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
118#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
119
Joerg Roedel67ec6602010-05-17 14:43:35 +0200120static bool erratum_383_found __read_mostly;
121
Avi Kivity6c8166a2009-05-31 18:15:37 +0300122static const u32 host_save_user_msrs[] = {
123#ifdef CONFIG_X86_64
124 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
125 MSR_FS_BASE,
126#endif
127 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
Paolo Bonzini46896c72015-11-12 14:49:16 +0100128 MSR_TSC_AUX,
Avi Kivity6c8166a2009-05-31 18:15:37 +0300129};
130
131#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
132
133struct kvm_vcpu;
134
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200135struct nested_state {
136 struct vmcb *hsave;
137 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +0100138 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200139 u64 vmcb;
140
141 /* These are the merged vectors */
142 u32 *msrpm;
143
144 /* gpa pointers to the real vectors */
145 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +0100146 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200147
Joerg Roedelcd3ff652009-10-09 16:08:26 +0200148 /* A VMEXIT is required but not yet emulated */
149 bool exit_required;
150
Joerg Roedelaad42c62009-08-07 11:49:34 +0200151 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100152 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100153 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200154 u32 intercept_exceptions;
155 u64 intercept;
156
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200157 /* Nested Paging related state */
158 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200159};
160
Joerg Roedel323c3d82010-03-01 15:34:37 +0100161#define MSRPM_OFFSETS 16
162static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
163
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500164/*
165 * Set osvw_len to higher value when updated Revision Guides
166 * are published and we know what the new status bits are
167 */
168static uint64_t osvw_len = 4, osvw_status;
169
Avi Kivity6c8166a2009-05-31 18:15:37 +0300170struct vcpu_svm {
171 struct kvm_vcpu vcpu;
172 struct vmcb *vmcb;
173 unsigned long vmcb_pa;
174 struct svm_cpu_data *svm_data;
175 uint64_t asid_generation;
176 uint64_t sysenter_esp;
177 uint64_t sysenter_eip;
Paolo Bonzini46896c72015-11-12 14:49:16 +0100178 uint64_t tsc_aux;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300179
180 u64 next_rip;
181
182 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200183 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200184 u16 fs;
185 u16 gs;
186 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200187 u64 gs_base;
188 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300189
190 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300191
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200192 ulong nmi_iret_rip;
193
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200194 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200195
196 bool nmi_singlestep;
Ladi Prosekab2f4d732017-06-21 09:06:58 +0200197 u64 nmi_singlestep_guest_rflags;
Jan Kiszka66b71382010-02-23 17:47:56 +0100198
199 unsigned int3_injected;
200 unsigned long int3_rip;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100201
Joerg Roedel6092d3d2015-10-14 15:10:54 +0200202 /* cached guest cpuid flags for faster access */
203 bool nrips_enabled : 1;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500204
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500205 u32 ldr_reg;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500206 struct page *avic_backing_page;
207 u64 *avic_physical_id_cache;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -0500208 bool avic_is_running;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500209
210 /*
211 * Per-vcpu list of struct amd_svm_iommu_ir:
212 * This is used mainly to store interrupt remapping information used
213 * when update the vcpu affinity. This avoids the need to scan for
214 * IRTE and try to match ga_tag in the IOMMU driver.
215 */
216 struct list_head ir_list;
217 spinlock_t ir_list_lock;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600218
219 /* which host CPU was used for running this vcpu */
220 unsigned int last_cpu;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500221};
222
223/*
224 * This is a wrapper of struct amd_iommu_ir_data.
225 */
226struct amd_svm_iommu_ir {
227 struct list_head node; /* Used by SVM for per-vcpu ir_list */
228 void *data; /* Storing pointer to struct amd_ir_data */
Avi Kivity6c8166a2009-05-31 18:15:37 +0300229};
230
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500231#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
232#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
233
234#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
235#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
236#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
237#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
238
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100239static DEFINE_PER_CPU(u64, current_tsc_ratio);
240#define TSC_RATIO_DEFAULT 0x0100000000ULL
241
Joerg Roedel455716f2010-03-01 15:34:35 +0100242#define MSR_INVALID 0xffffffffU
243
Mathias Krause09941fb2012-08-30 01:30:20 +0200244static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100245 u32 index; /* Index of the MSR */
246 bool always; /* True if intercept is always on */
247} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400248 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100249 { .index = MSR_IA32_SYSENTER_CS, .always = true },
250#ifdef CONFIG_X86_64
251 { .index = MSR_GS_BASE, .always = true },
252 { .index = MSR_FS_BASE, .always = true },
253 { .index = MSR_KERNEL_GS_BASE, .always = true },
254 { .index = MSR_LSTAR, .always = true },
255 { .index = MSR_CSTAR, .always = true },
256 { .index = MSR_SYSCALL_MASK, .always = true },
257#endif
258 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
259 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
260 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
261 { .index = MSR_IA32_LASTINTTOIP, .always = false },
262 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263};
264
265/* enable NPT for AMD64 and X86 with PAE */
266#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
267static bool npt_enabled = true;
268#else
Joerg Roedele0231712010-02-24 18:59:10 +0100269static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270#endif
271
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100272/* allow nested paging (virtualized MMU) for all guests */
273static int npt = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800274module_param(npt, int, S_IRUGO);
275
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100276/* allow nested virtualization in KVM/SVM */
277static int nested = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800278module_param(nested, int, S_IRUGO);
279
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500280/* enable / disable AVIC */
281static int avic;
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500282#ifdef CONFIG_X86_LOCAL_APIC
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500283module_param(avic, int, S_IRUGO);
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500284#endif
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500285
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500286/* enable/disable Virtual VMLOAD VMSAVE */
287static int vls = true;
288module_param(vls, int, 0444);
289
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500290/* enable/disable Virtual GIF */
291static int vgif = true;
292module_param(vgif, int, 0444);
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500293
Brijesh Singhe9df0942017-12-04 10:57:33 -0600294/* enable/disable SEV support */
295static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
296module_param(sev, int, 0444);
297
Paolo Bonzini79a80592015-09-21 07:46:55 +0200298static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800299static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200300static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800301
Joerg Roedel410e4d52009-08-07 11:49:44 +0200302static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100303static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800304static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800305static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
306 bool has_error_code, u32 error_code);
307
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100308enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100309 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
310 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100311 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100312 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100313 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100314 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100315 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100316 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100317 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100318 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100319 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100320 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500321 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
322 * AVIC PHYSICAL_TABLE pointer,
323 * AVIC LOGICAL_TABLE pointer
324 */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100325 VMCB_DIRTY_MAX,
326};
327
Joerg Roedel0574dec2010-12-03 11:45:58 +0100328/* TPR and CR2 are always written before VMRUN */
329#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100330
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500331#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
332
Brijesh Singhed3cd232017-12-04 10:57:32 -0600333static unsigned int max_sev_asid;
Brijesh Singh1654efc2017-12-04 10:57:34 -0600334static unsigned int min_sev_asid;
335static unsigned long *sev_asid_bitmap;
Brijesh Singh89c50582017-12-04 10:57:35 -0600336#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
Brijesh Singh1654efc2017-12-04 10:57:34 -0600337
Brijesh Singh1e80fdc2017-12-04 10:57:38 -0600338struct enc_region {
339 struct list_head list;
340 unsigned long npages;
341 struct page **pages;
342 unsigned long uaddr;
343 unsigned long size;
344};
345
Brijesh Singh1654efc2017-12-04 10:57:34 -0600346static inline bool svm_sev_enabled(void)
347{
348 return max_sev_asid;
349}
350
351static inline bool sev_guest(struct kvm *kvm)
352{
353 struct kvm_sev_info *sev = &kvm->arch.sev_info;
354
355 return sev->active;
356}
Brijesh Singhed3cd232017-12-04 10:57:32 -0600357
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600358static inline int sev_get_asid(struct kvm *kvm)
359{
360 struct kvm_sev_info *sev = &kvm->arch.sev_info;
361
362 return sev->asid;
363}
364
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100365static inline void mark_all_dirty(struct vmcb *vmcb)
366{
367 vmcb->control.clean = 0;
368}
369
370static inline void mark_all_clean(struct vmcb *vmcb)
371{
372 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
373 & ~VMCB_ALWAYS_DIRTY_MASK;
374}
375
376static inline void mark_dirty(struct vmcb *vmcb, int bit)
377{
378 vmcb->control.clean &= ~(1 << bit);
379}
380
Avi Kivity6aa8b732006-12-10 02:21:36 -0800381static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
382{
383 return container_of(vcpu, struct vcpu_svm, vcpu);
384}
385
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500386static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
387{
388 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
389 mark_dirty(svm->vmcb, VMCB_AVIC);
390}
391
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -0500392static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
393{
394 struct vcpu_svm *svm = to_svm(vcpu);
395 u64 *entry = svm->avic_physical_id_cache;
396
397 if (!entry)
398 return false;
399
400 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
401}
402
Joerg Roedel384c6362010-11-30 18:03:56 +0100403static void recalc_intercepts(struct vcpu_svm *svm)
404{
405 struct vmcb_control_area *c, *h;
406 struct nested_state *g;
407
Joerg Roedel116a0a22010-12-03 11:45:49 +0100408 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
409
Joerg Roedel384c6362010-11-30 18:03:56 +0100410 if (!is_guest_mode(&svm->vcpu))
411 return;
412
413 c = &svm->vmcb->control;
414 h = &svm->nested.hsave->control;
415 g = &svm->nested;
416
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100417 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100418 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100419 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
420 c->intercept = h->intercept | g->intercept;
421}
422
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100423static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
424{
425 if (is_guest_mode(&svm->vcpu))
426 return svm->nested.hsave;
427 else
428 return svm->vmcb;
429}
430
431static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
432{
433 struct vmcb *vmcb = get_host_vmcb(svm);
434
435 vmcb->control.intercept_cr |= (1U << bit);
436
437 recalc_intercepts(svm);
438}
439
440static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
441{
442 struct vmcb *vmcb = get_host_vmcb(svm);
443
444 vmcb->control.intercept_cr &= ~(1U << bit);
445
446 recalc_intercepts(svm);
447}
448
449static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
450{
451 struct vmcb *vmcb = get_host_vmcb(svm);
452
453 return vmcb->control.intercept_cr & (1U << bit);
454}
455
Paolo Bonzini5315c712014-03-03 13:08:29 +0100456static inline void set_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100457{
458 struct vmcb *vmcb = get_host_vmcb(svm);
459
Paolo Bonzini5315c712014-03-03 13:08:29 +0100460 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
461 | (1 << INTERCEPT_DR1_READ)
462 | (1 << INTERCEPT_DR2_READ)
463 | (1 << INTERCEPT_DR3_READ)
464 | (1 << INTERCEPT_DR4_READ)
465 | (1 << INTERCEPT_DR5_READ)
466 | (1 << INTERCEPT_DR6_READ)
467 | (1 << INTERCEPT_DR7_READ)
468 | (1 << INTERCEPT_DR0_WRITE)
469 | (1 << INTERCEPT_DR1_WRITE)
470 | (1 << INTERCEPT_DR2_WRITE)
471 | (1 << INTERCEPT_DR3_WRITE)
472 | (1 << INTERCEPT_DR4_WRITE)
473 | (1 << INTERCEPT_DR5_WRITE)
474 | (1 << INTERCEPT_DR6_WRITE)
475 | (1 << INTERCEPT_DR7_WRITE);
Joerg Roedel3aed0412010-11-30 18:03:58 +0100476
477 recalc_intercepts(svm);
478}
479
Paolo Bonzini5315c712014-03-03 13:08:29 +0100480static inline void clr_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100481{
482 struct vmcb *vmcb = get_host_vmcb(svm);
483
Paolo Bonzini5315c712014-03-03 13:08:29 +0100484 vmcb->control.intercept_dr = 0;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100485
486 recalc_intercepts(svm);
487}
488
Joerg Roedel18c918c2010-11-30 18:03:59 +0100489static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
490{
491 struct vmcb *vmcb = get_host_vmcb(svm);
492
493 vmcb->control.intercept_exceptions |= (1U << bit);
494
495 recalc_intercepts(svm);
496}
497
498static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
499{
500 struct vmcb *vmcb = get_host_vmcb(svm);
501
502 vmcb->control.intercept_exceptions &= ~(1U << bit);
503
504 recalc_intercepts(svm);
505}
506
Joerg Roedel8a05a1b82010-11-30 18:04:00 +0100507static inline void set_intercept(struct vcpu_svm *svm, int bit)
508{
509 struct vmcb *vmcb = get_host_vmcb(svm);
510
511 vmcb->control.intercept |= (1ULL << bit);
512
513 recalc_intercepts(svm);
514}
515
516static inline void clr_intercept(struct vcpu_svm *svm, int bit)
517{
518 struct vmcb *vmcb = get_host_vmcb(svm);
519
520 vmcb->control.intercept &= ~(1ULL << bit);
521
522 recalc_intercepts(svm);
523}
524
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500525static inline bool vgif_enabled(struct vcpu_svm *svm)
526{
527 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
528}
529
Joerg Roedel2af91942009-08-07 11:49:28 +0200530static inline void enable_gif(struct vcpu_svm *svm)
531{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500532 if (vgif_enabled(svm))
533 svm->vmcb->control.int_ctl |= V_GIF_MASK;
534 else
535 svm->vcpu.arch.hflags |= HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200536}
537
538static inline void disable_gif(struct vcpu_svm *svm)
539{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500540 if (vgif_enabled(svm))
541 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
542 else
543 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200544}
545
546static inline bool gif_set(struct vcpu_svm *svm)
547{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500548 if (vgif_enabled(svm))
549 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
550 else
551 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
Joerg Roedel2af91942009-08-07 11:49:28 +0200552}
553
Avi Kivity6aa8b732006-12-10 02:21:36 -0800554static unsigned long iopm_base;
555
556struct kvm_ldttss_desc {
557 u16 limit0;
558 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100559 unsigned base1:8, type:5, dpl:2, p:1;
560 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800561 u32 base3;
562 u32 zero1;
563} __attribute__((packed));
564
565struct svm_cpu_data {
566 int cpu;
567
Avi Kivity5008fdf2007-04-02 13:05:50 +0300568 u64 asid_generation;
569 u32 max_asid;
570 u32 next_asid;
Brijesh Singh4faefff2017-12-04 10:57:25 -0600571 u32 min_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800572 struct kvm_ldttss_desc *tss_desc;
573
574 struct page *save_area;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600575
576 /* index = sev_asid, value = vmcb pointer */
577 struct vmcb **sev_vmcbs;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800578};
579
580static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
581
582struct svm_init_data {
583 int cpu;
584 int r;
585};
586
Mathias Krause09941fb2012-08-30 01:30:20 +0200587static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800588
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200589#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800590#define MSRS_RANGE_SIZE 2048
591#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
592
Joerg Roedel455716f2010-03-01 15:34:35 +0100593static u32 svm_msrpm_offset(u32 msr)
594{
595 u32 offset;
596 int i;
597
598 for (i = 0; i < NUM_MSR_MAPS; i++) {
599 if (msr < msrpm_ranges[i] ||
600 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
601 continue;
602
603 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
604 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
605
606 /* Now we have the u8 offset - but need the u32 offset */
607 return offset / 4;
608 }
609
610 /* MSR not in any range */
611 return MSR_INVALID;
612}
613
Avi Kivity6aa8b732006-12-10 02:21:36 -0800614#define MAX_INST_SIZE 15
615
Avi Kivity6aa8b732006-12-10 02:21:36 -0800616static inline void clgi(void)
617{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300618 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800619}
620
621static inline void stgi(void)
622{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300623 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800624}
625
626static inline void invlpga(unsigned long addr, u32 asid)
627{
Joerg Roedele0231712010-02-24 18:59:10 +0100628 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800629}
630
Yu Zhang855feb62017-08-24 20:27:55 +0800631static int get_npt_level(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +0200632{
633#ifdef CONFIG_X86_64
Yu Zhang2a7266a2017-08-24 20:27:54 +0800634 return PT64_ROOT_4LEVEL;
Joerg Roedel4b161842010-09-10 17:31:03 +0200635#else
636 return PT32E_ROOT_LEVEL;
637#endif
638}
639
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
641{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000642 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100643 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600644 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800645
Alexander Graf9962d032008-11-25 20:17:02 +0100646 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100647 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800648}
649
Avi Kivity6aa8b732006-12-10 02:21:36 -0800650static int is_external_interrupt(u32 info)
651{
652 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
653 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
654}
655
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200656static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -0400657{
658 struct vcpu_svm *svm = to_svm(vcpu);
659 u32 ret = 0;
660
661 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200662 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
663 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400664}
665
666static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
667{
668 struct vcpu_svm *svm = to_svm(vcpu);
669
670 if (mask == 0)
671 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
672 else
673 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
674
675}
676
Avi Kivity6aa8b732006-12-10 02:21:36 -0800677static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
678{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400679 struct vcpu_svm *svm = to_svm(vcpu);
680
Bandan Dasf1047652015-06-11 02:05:33 -0400681 if (svm->vmcb->control.next_rip != 0) {
Dirk Müllerd2922422015-10-01 13:43:42 +0200682 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200683 svm->next_rip = svm->vmcb->control.next_rip;
Bandan Dasf1047652015-06-11 02:05:33 -0400684 }
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200685
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400686 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100687 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300688 EMULATE_DONE)
689 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800690 return;
691 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300692 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
693 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
694 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800695
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300696 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400697 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800698}
699
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700700static void svm_queue_exception(struct kvm_vcpu *vcpu)
Jan Kiszka116a4752010-02-23 17:47:54 +0100701{
702 struct vcpu_svm *svm = to_svm(vcpu);
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700703 unsigned nr = vcpu->arch.exception.nr;
704 bool has_error_code = vcpu->arch.exception.has_error_code;
Wanpeng Li664f8e22017-08-24 03:35:09 -0700705 bool reinject = vcpu->arch.exception.injected;
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700706 u32 error_code = vcpu->arch.exception.error_code;
Jan Kiszka116a4752010-02-23 17:47:54 +0100707
Joerg Roedele0231712010-02-24 18:59:10 +0100708 /*
709 * If we are within a nested VM we'd better #VMEXIT and let the guest
710 * handle the exception
711 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200712 if (!reinject &&
713 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100714 return;
715
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200716 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100717 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
718
719 /*
720 * For guest debugging where we have to reinject #BP if some
721 * INT3 is guest-owned:
722 * Emulate nRIP by moving RIP forward. Will fail if injection
723 * raises a fault that is not intercepted. Still better than
724 * failing in all cases.
725 */
726 skip_emulated_instruction(&svm->vcpu);
727 rip = kvm_rip_read(&svm->vcpu);
728 svm->int3_rip = rip + svm->vmcb->save.cs.base;
729 svm->int3_injected = rip - old_rip;
730 }
731
Jan Kiszka116a4752010-02-23 17:47:54 +0100732 svm->vmcb->control.event_inj = nr
733 | SVM_EVTINJ_VALID
734 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
735 | SVM_EVTINJ_TYPE_EXEPT;
736 svm->vmcb->control.event_inj_err = error_code;
737}
738
Joerg Roedel67ec6602010-05-17 14:43:35 +0200739static void svm_init_erratum_383(void)
740{
741 u32 low, high;
742 int err;
743 u64 val;
744
Borislav Petkove6ee94d2013-03-20 15:07:27 +0100745 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200746 return;
747
748 /* Use _safe variants to not break nested virtualization */
749 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
750 if (err)
751 return;
752
753 val |= (1ULL << 47);
754
755 low = lower_32_bits(val);
756 high = upper_32_bits(val);
757
758 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
759
760 erratum_383_found = true;
761}
762
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500763static void svm_init_osvw(struct kvm_vcpu *vcpu)
764{
765 /*
766 * Guests should see errata 400 and 415 as fixed (assuming that
767 * HLT and IO instructions are intercepted).
768 */
769 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
770 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
771
772 /*
773 * By increasing VCPU's osvw.length to 3 we are telling the guest that
774 * all osvw.status bits inside that length, including bit 0 (which is
775 * reserved for erratum 298), are valid. However, if host processor's
776 * osvw_len is 0 then osvw_status[0] carries no information. We need to
777 * be conservative here and therefore we tell the guest that erratum 298
778 * is present (because we really don't know).
779 */
780 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
781 vcpu->arch.osvw.status |= 1;
782}
783
Avi Kivity6aa8b732006-12-10 02:21:36 -0800784static int has_svm(void)
785{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200786 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800787
Eduardo Habkost63d11422008-11-17 19:03:20 -0200788 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800789 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800790 return 0;
791 }
792
Avi Kivity6aa8b732006-12-10 02:21:36 -0800793 return 1;
794}
795
Radim Krčmář13a34e02014-08-28 15:13:03 +0200796static void svm_hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800797{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100798 /* Make sure we clean up behind us */
799 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
800 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
801
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200802 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100803
804 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800805}
806
Radim Krčmář13a34e02014-08-28 15:13:03 +0200807static int svm_hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800808{
809
Tejun Heo0fe1e002009-10-29 22:34:14 +0900810 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800811 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800812 struct desc_struct *gdt;
813 int me = raw_smp_processor_id();
814
Alexander Graf10474ae2009-09-15 11:37:46 +0200815 rdmsrl(MSR_EFER, efer);
816 if (efer & EFER_SVME)
817 return -EBUSY;
818
Avi Kivity6aa8b732006-12-10 02:21:36 -0800819 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200820 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200821 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800822 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900823 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900824 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200825 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200826 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800827 }
828
Tejun Heo0fe1e002009-10-29 22:34:14 +0900829 sd->asid_generation = 1;
830 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
831 sd->next_asid = sd->max_asid + 1;
Brijesh Singhed3cd232017-12-04 10:57:32 -0600832 sd->min_asid = max_sev_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800833
Thomas Garnier45fc8752017-03-14 10:05:08 -0700834 gdt = get_current_gdt_rw();
Tejun Heo0fe1e002009-10-29 22:34:14 +0900835 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800836
Alexander Graf9962d032008-11-25 20:17:02 +0100837 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838
Linus Torvaldsd0316552009-12-14 09:58:24 -0800839 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200840
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100841 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
842 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500843 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100844 }
845
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500846
847 /*
848 * Get OSVW bits.
849 *
850 * Note that it is possible to have a system with mixed processor
851 * revisions and therefore different OSVW bits. If bits are not the same
852 * on different processors then choose the worst case (i.e. if erratum
853 * is present on one processor and not on another then assume that the
854 * erratum is present everywhere).
855 */
856 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
857 uint64_t len, status = 0;
858 int err;
859
860 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
861 if (!err)
862 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
863 &err);
864
865 if (err)
866 osvw_status = osvw_len = 0;
867 else {
868 if (len < osvw_len)
869 osvw_len = len;
870 osvw_status |= status;
871 osvw_status &= (1ULL << osvw_len) - 1;
872 }
873 } else
874 osvw_status = osvw_len = 0;
875
Joerg Roedel67ec6602010-05-17 14:43:35 +0200876 svm_init_erratum_383();
877
Joerg Roedel1018faa2012-02-29 14:57:32 +0100878 amd_pmu_enable_virt();
879
Alexander Graf10474ae2009-09-15 11:37:46 +0200880 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800881}
882
Joerg Roedel0da1db752008-07-02 16:02:11 +0200883static void svm_cpu_uninit(int cpu)
884{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900885 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200886
Tejun Heo0fe1e002009-10-29 22:34:14 +0900887 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200888 return;
889
890 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600891 kfree(sd->sev_vmcbs);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900892 __free_page(sd->save_area);
893 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200894}
895
Avi Kivity6aa8b732006-12-10 02:21:36 -0800896static int svm_cpu_init(int cpu)
897{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900898 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800899 int r;
900
Tejun Heo0fe1e002009-10-29 22:34:14 +0900901 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
902 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800903 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900904 sd->cpu = cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800905 r = -ENOMEM;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600906 sd->save_area = alloc_page(GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900907 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800908 goto err_1;
909
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600910 if (svm_sev_enabled()) {
911 r = -ENOMEM;
912 sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
913 if (!sd->sev_vmcbs)
914 goto err_1;
915 }
916
Tejun Heo0fe1e002009-10-29 22:34:14 +0900917 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918
919 return 0;
920
921err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900922 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800923 return r;
924
925}
926
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100927static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800928{
929 int i;
930
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100931 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
932 if (direct_access_msrs[i].index == index)
933 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800934
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100935 return false;
936}
937
Avi Kivity6aa8b732006-12-10 02:21:36 -0800938static void set_msr_interception(u32 *msrpm, unsigned msr,
939 int read, int write)
940{
Joerg Roedel455716f2010-03-01 15:34:35 +0100941 u8 bit_read, bit_write;
942 unsigned long tmp;
943 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800944
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100945 /*
946 * If this warning triggers extend the direct_access_msrs list at the
947 * beginning of the file
948 */
949 WARN_ON(!valid_msr_intercept(msr));
950
Joerg Roedel455716f2010-03-01 15:34:35 +0100951 offset = svm_msrpm_offset(msr);
952 bit_read = 2 * (msr & 0x0f);
953 bit_write = 2 * (msr & 0x0f) + 1;
954 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800955
Joerg Roedel455716f2010-03-01 15:34:35 +0100956 BUG_ON(offset == MSR_INVALID);
957
958 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
959 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
960
961 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800962}
963
Joerg Roedelf65c2292008-02-13 18:58:46 +0100964static void svm_vcpu_init_msrpm(u32 *msrpm)
965{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100966 int i;
967
Joerg Roedelf65c2292008-02-13 18:58:46 +0100968 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
969
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100970 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
971 if (!direct_access_msrs[i].always)
972 continue;
973
974 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
975 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100976}
977
Joerg Roedel323c3d82010-03-01 15:34:37 +0100978static void add_msr_offset(u32 offset)
979{
980 int i;
981
982 for (i = 0; i < MSRPM_OFFSETS; ++i) {
983
984 /* Offset already in list? */
985 if (msrpm_offsets[i] == offset)
986 return;
987
988 /* Slot used by another offset? */
989 if (msrpm_offsets[i] != MSR_INVALID)
990 continue;
991
992 /* Add offset to list */
993 msrpm_offsets[i] = offset;
994
995 return;
996 }
997
998 /*
999 * If this BUG triggers the msrpm_offsets table has an overflow. Just
1000 * increase MSRPM_OFFSETS in this case.
1001 */
1002 BUG();
1003}
1004
1005static void init_msrpm_offsets(void)
1006{
1007 int i;
1008
1009 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1010
1011 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1012 u32 offset;
1013
1014 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1015 BUG_ON(offset == MSR_INVALID);
1016
1017 add_msr_offset(offset);
1018 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001019}
1020
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001021static void svm_enable_lbrv(struct vcpu_svm *svm)
1022{
1023 u32 *msrpm = svm->msrpm;
1024
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001025 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001026 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1027 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1028 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1029 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1030}
1031
1032static void svm_disable_lbrv(struct vcpu_svm *svm)
1033{
1034 u32 *msrpm = svm->msrpm;
1035
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001036 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001037 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1038 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1039 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1040 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1041}
1042
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001043static void disable_nmi_singlestep(struct vcpu_svm *svm)
1044{
1045 svm->nmi_singlestep = false;
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001046
Ladi Prosekab2f4d732017-06-21 09:06:58 +02001047 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1048 /* Clear our flags if they were not set by the guest */
1049 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1050 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1051 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1052 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1053 }
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001054}
1055
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001056/* Note:
1057 * This hash table is used to map VM_ID to a struct kvm_arch,
1058 * when handling AMD IOMMU GALOG notification to schedule in
1059 * a particular vCPU.
1060 */
1061#define SVM_VM_DATA_HASH_BITS 8
David Hildenbrand681bcea2017-01-24 22:21:16 +01001062static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001063static u32 next_vm_id = 0;
1064static bool next_vm_id_wrapped = 0;
David Hildenbrand681bcea2017-01-24 22:21:16 +01001065static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001066
1067/* Note:
1068 * This function is called from IOMMU driver to notify
1069 * SVM to schedule in a particular vCPU of a particular VM.
1070 */
1071static int avic_ga_log_notifier(u32 ga_tag)
1072{
1073 unsigned long flags;
1074 struct kvm_arch *ka = NULL;
1075 struct kvm_vcpu *vcpu = NULL;
1076 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1077 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1078
1079 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1080
1081 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1082 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1083 struct kvm *kvm = container_of(ka, struct kvm, arch);
1084 struct kvm_arch *vm_data = &kvm->arch;
1085
1086 if (vm_data->avic_vm_id != vm_id)
1087 continue;
1088 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1089 break;
1090 }
1091 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1092
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001093 /* Note:
1094 * At this point, the IOMMU should have already set the pending
1095 * bit in the vAPIC backing page. So, we just need to schedule
1096 * in the vcpu.
1097 */
Paolo Bonzini1cf53582017-10-10 12:51:56 +02001098 if (vcpu)
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001099 kvm_vcpu_wake_up(vcpu);
1100
1101 return 0;
1102}
1103
Brijesh Singhe9df0942017-12-04 10:57:33 -06001104static __init int sev_hardware_setup(void)
1105{
1106 struct sev_user_data_status *status;
1107 int rc;
1108
1109 /* Maximum number of encrypted guests supported simultaneously */
1110 max_sev_asid = cpuid_ecx(0x8000001F);
1111
1112 if (!max_sev_asid)
1113 return 1;
1114
Brijesh Singh1654efc2017-12-04 10:57:34 -06001115 /* Minimum ASID value that should be used for SEV guest */
1116 min_sev_asid = cpuid_edx(0x8000001F);
1117
1118 /* Initialize SEV ASID bitmap */
1119 sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
1120 sizeof(unsigned long), GFP_KERNEL);
1121 if (!sev_asid_bitmap)
1122 return 1;
1123
Brijesh Singhe9df0942017-12-04 10:57:33 -06001124 status = kmalloc(sizeof(*status), GFP_KERNEL);
1125 if (!status)
1126 return 1;
1127
1128 /*
1129 * Check SEV platform status.
1130 *
1131 * PLATFORM_STATUS can be called in any state, if we failed to query
1132 * the PLATFORM status then either PSP firmware does not support SEV
1133 * feature or SEV firmware is dead.
1134 */
1135 rc = sev_platform_status(status, NULL);
1136 if (rc)
1137 goto err;
1138
1139 pr_info("SEV supported\n");
1140
1141err:
1142 kfree(status);
1143 return rc;
1144}
1145
Avi Kivity6aa8b732006-12-10 02:21:36 -08001146static __init int svm_hardware_setup(void)
1147{
1148 int cpu;
1149 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001150 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001151 int r;
1152
Avi Kivity6aa8b732006-12-10 02:21:36 -08001153 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1154
1155 if (!iopm_pages)
1156 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +03001157
1158 iopm_va = page_address(iopm_pages);
1159 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001160 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1161
Joerg Roedel323c3d82010-03-01 15:34:37 +01001162 init_msrpm_offsets();
1163
Joerg Roedel50a37eb2008-01-31 14:57:38 +01001164 if (boot_cpu_has(X86_FEATURE_NX))
1165 kvm_enable_efer_bits(EFER_NX);
1166
Alexander Graf1b2fd702009-02-02 16:23:51 +01001167 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1168 kvm_enable_efer_bits(EFER_FFXSR);
1169
Joerg Roedel92a1f122011-03-25 09:44:51 +01001170 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
Joerg Roedel92a1f122011-03-25 09:44:51 +01001171 kvm_has_tsc_control = true;
Haozhong Zhangbc9b9612015-10-20 15:39:01 +08001172 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1173 kvm_tsc_scaling_ratio_frac_bits = 32;
Joerg Roedel92a1f122011-03-25 09:44:51 +01001174 }
1175
Alexander Graf236de052008-11-25 20:17:10 +01001176 if (nested) {
1177 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +02001178 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +01001179 }
1180
Brijesh Singhe9df0942017-12-04 10:57:33 -06001181 if (sev) {
1182 if (boot_cpu_has(X86_FEATURE_SEV) &&
1183 IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1184 r = sev_hardware_setup();
1185 if (r)
1186 sev = false;
1187 } else {
1188 sev = false;
1189 }
1190 }
1191
Zachary Amsden3230bb42009-09-29 11:38:37 -10001192 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001193 r = svm_cpu_init(cpu);
1194 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +01001195 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001196 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +01001197
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001198 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001199 npt_enabled = false;
1200
Joerg Roedel6c7dac72008-02-07 13:47:40 +01001201 if (npt_enabled && !npt) {
1202 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1203 npt_enabled = false;
1204 }
1205
Joerg Roedel18552672008-02-07 13:47:41 +01001206 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001207 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +01001208 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +02001209 } else
1210 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001211
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001212 if (avic) {
1213 if (!npt_enabled ||
1214 !boot_cpu_has(X86_FEATURE_AVIC) ||
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001215 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001216 avic = false;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001217 } else {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001218 pr_info("AVIC enabled\n");
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001219
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001220 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1221 }
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001222 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001223
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001224 if (vls) {
1225 if (!npt_enabled ||
Borislav Petkov5442c262017-08-01 20:55:52 +02001226 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001227 !IS_ENABLED(CONFIG_X86_64)) {
1228 vls = false;
1229 } else {
1230 pr_info("Virtual VMLOAD VMSAVE supported\n");
1231 }
1232 }
1233
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001234 if (vgif) {
1235 if (!boot_cpu_has(X86_FEATURE_VGIF))
1236 vgif = false;
1237 else
1238 pr_info("Virtual GIF supported\n");
1239 }
1240
Avi Kivity6aa8b732006-12-10 02:21:36 -08001241 return 0;
1242
Joerg Roedelf65c2292008-02-13 18:58:46 +01001243err:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001244 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1245 iopm_base = 0;
1246 return r;
1247}
1248
1249static __exit void svm_hardware_unsetup(void)
1250{
Joerg Roedel0da1db752008-07-02 16:02:11 +02001251 int cpu;
1252
Brijesh Singh1654efc2017-12-04 10:57:34 -06001253 if (svm_sev_enabled())
1254 kfree(sev_asid_bitmap);
1255
Zachary Amsden3230bb42009-09-29 11:38:37 -10001256 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +02001257 svm_cpu_uninit(cpu);
1258
Avi Kivity6aa8b732006-12-10 02:21:36 -08001259 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +01001260 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001261}
1262
1263static void init_seg(struct vmcb_seg *seg)
1264{
1265 seg->selector = 0;
1266 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +01001267 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001268 seg->limit = 0xffff;
1269 seg->base = 0;
1270}
1271
1272static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1273{
1274 seg->selector = 0;
1275 seg->attrib = SVM_SELECTOR_P_MASK | type;
1276 seg->limit = 0xffff;
1277 seg->base = 0;
1278}
1279
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001280static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1281{
1282 struct vcpu_svm *svm = to_svm(vcpu);
1283 u64 g_tsc_offset = 0;
1284
Joerg Roedel20307532010-11-29 17:51:48 +01001285 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001286 g_tsc_offset = svm->vmcb->control.tsc_offset -
1287 svm->nested.hsave->control.tsc_offset;
1288 svm->nested.hsave->control.tsc_offset = offset;
Yoshihiro YUNOMAE489223e2013-06-12 16:43:44 +09001289 } else
1290 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1291 svm->vmcb->control.tsc_offset,
1292 offset);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001293
1294 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001295
1296 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001297}
1298
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001299static void avic_init_vmcb(struct vcpu_svm *svm)
1300{
1301 struct vmcb *vmcb = svm->vmcb;
1302 struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001303 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1304 phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
1305 phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001306
1307 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1308 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1309 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1310 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1311 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001312}
1313
Paolo Bonzini56908912015-10-19 11:30:19 +02001314static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001315{
Joerg Roedele6101a92008-02-13 18:58:45 +01001316 struct vmcb_control_area *control = &svm->vmcb->control;
1317 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001318
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001319 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +02001320
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001321 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1322 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1323 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1324 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1325 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1326 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05001327 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1328 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001329
Paolo Bonzini5315c712014-03-03 13:08:29 +01001330 set_dr_intercepts(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001331
Joerg Roedel18c918c2010-11-30 18:03:59 +01001332 set_exception_intercept(svm, PF_VECTOR);
1333 set_exception_intercept(svm, UD_VECTOR);
1334 set_exception_intercept(svm, MC_VECTOR);
Eric Northup54a20552015-11-03 18:03:53 +01001335 set_exception_intercept(svm, AC_VECTOR);
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001336 set_exception_intercept(svm, DB_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001337
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001338 set_intercept(svm, INTERCEPT_INTR);
1339 set_intercept(svm, INTERCEPT_NMI);
1340 set_intercept(svm, INTERCEPT_SMI);
1341 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001342 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001343 set_intercept(svm, INTERCEPT_CPUID);
1344 set_intercept(svm, INTERCEPT_INVD);
1345 set_intercept(svm, INTERCEPT_HLT);
1346 set_intercept(svm, INTERCEPT_INVLPG);
1347 set_intercept(svm, INTERCEPT_INVLPGA);
1348 set_intercept(svm, INTERCEPT_IOIO_PROT);
1349 set_intercept(svm, INTERCEPT_MSR_PROT);
1350 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1351 set_intercept(svm, INTERCEPT_SHUTDOWN);
1352 set_intercept(svm, INTERCEPT_VMRUN);
1353 set_intercept(svm, INTERCEPT_VMMCALL);
1354 set_intercept(svm, INTERCEPT_VMLOAD);
1355 set_intercept(svm, INTERCEPT_VMSAVE);
1356 set_intercept(svm, INTERCEPT_STGI);
1357 set_intercept(svm, INTERCEPT_CLGI);
1358 set_intercept(svm, INTERCEPT_SKINIT);
1359 set_intercept(svm, INTERCEPT_WBINVD);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001360 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001361
Michael S. Tsirkin668fffa32017-04-21 12:27:17 +02001362 if (!kvm_mwait_in_guest()) {
1363 set_intercept(svm, INTERCEPT_MONITOR);
1364 set_intercept(svm, INTERCEPT_MWAIT);
1365 }
1366
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001367 control->iopm_base_pa = __sme_set(iopm_base);
1368 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001369 control->int_ctl = V_INTR_MASKING_MASK;
1370
1371 init_seg(&save->es);
1372 init_seg(&save->ss);
1373 init_seg(&save->ds);
1374 init_seg(&save->fs);
1375 init_seg(&save->gs);
1376
1377 save->cs.selector = 0xf000;
Paolo Bonzini04b66832013-03-19 16:30:26 +01001378 save->cs.base = 0xffff0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001379 /* Executable/Readable Code Segment */
1380 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1381 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1382 save->cs.limit = 0xffff;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001383
1384 save->gdtr.limit = 0xffff;
1385 save->idtr.limit = 0xffff;
1386
1387 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1388 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1389
Paolo Bonzini56908912015-10-19 11:30:19 +02001390 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001391 save->dr6 = 0xffff0ff0;
Avi Kivityf6e78472010-08-02 15:30:20 +03001392 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001393 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001394 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001395
Joerg Roedele0231712010-02-24 18:59:10 +01001396 /*
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001397 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001398 * It also updates the guest-visible cr0 value.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001399 */
Paolo Bonzini79a80592015-09-21 07:46:55 +02001400 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Igor Mammedovebae8712015-09-18 15:39:05 +02001401 kvm_mmu_reset_context(&svm->vcpu);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001402
Rusty Russell66aee912007-07-17 23:34:16 +10001403 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001404 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001405
1406 if (npt_enabled) {
1407 /* Setup VMCB for Nested Paging */
Tom Lendackycea3a192017-12-04 10:57:24 -06001408 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001409 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001410 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001411 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1412 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Radim Krčmář74545702015-04-27 15:11:25 +02001413 save->g_pat = svm->vcpu.arch.pat;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001414 save->cr3 = 0;
1415 save->cr4 = 0;
1416 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001417 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001418
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001419 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001420 svm->vcpu.arch.hflags = 0;
1421
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001422 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001423 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001424 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001425 }
1426
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001427 if (kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001428 avic_init_vmcb(svm);
1429
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001430 /*
1431 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1432 * in VMCB and clear intercepts to avoid #VMEXIT.
1433 */
1434 if (vls) {
1435 clr_intercept(svm, INTERCEPT_VMLOAD);
1436 clr_intercept(svm, INTERCEPT_VMSAVE);
1437 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1438 }
1439
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001440 if (vgif) {
1441 clr_intercept(svm, INTERCEPT_STGI);
1442 clr_intercept(svm, INTERCEPT_CLGI);
1443 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1444 }
1445
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001446 if (sev_guest(svm->vcpu.kvm)) {
Brijesh Singh1654efc2017-12-04 10:57:34 -06001447 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001448 clr_exception_intercept(svm, UD_VECTOR);
1449 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001450
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001451 mark_all_dirty(svm->vmcb);
1452
Joerg Roedel2af91942009-08-07 11:49:28 +02001453 enable_gif(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001454
1455}
1456
Dan Carpenterd3e7dec2017-05-18 10:38:53 +03001457static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1458 unsigned int index)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001459{
1460 u64 *avic_physical_id_table;
1461 struct kvm_arch *vm_data = &vcpu->kvm->arch;
1462
1463 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1464 return NULL;
1465
1466 avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
1467
1468 return &avic_physical_id_table[index];
1469}
1470
1471/**
1472 * Note:
1473 * AVIC hardware walks the nested page table to check permissions,
1474 * but does not use the SPA address specified in the leaf page
1475 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
1476 * field of the VMCB. Therefore, we set up the
1477 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1478 */
1479static int avic_init_access_page(struct kvm_vcpu *vcpu)
1480{
1481 struct kvm *kvm = vcpu->kvm;
1482 int ret;
1483
1484 if (kvm->arch.apic_access_page_done)
1485 return 0;
1486
1487 ret = x86_set_memory_region(kvm,
1488 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1489 APIC_DEFAULT_PHYS_BASE,
1490 PAGE_SIZE);
1491 if (ret)
1492 return ret;
1493
1494 kvm->arch.apic_access_page_done = true;
1495 return 0;
1496}
1497
1498static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1499{
1500 int ret;
1501 u64 *entry, new_entry;
1502 int id = vcpu->vcpu_id;
1503 struct vcpu_svm *svm = to_svm(vcpu);
1504
1505 ret = avic_init_access_page(vcpu);
1506 if (ret)
1507 return ret;
1508
1509 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1510 return -EINVAL;
1511
1512 if (!svm->vcpu.arch.apic->regs)
1513 return -EINVAL;
1514
1515 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1516
1517 /* Setting AVIC backing page address in the phy APIC ID table */
1518 entry = avic_get_physical_id_entry(vcpu, id);
1519 if (!entry)
1520 return -EINVAL;
1521
1522 new_entry = READ_ONCE(*entry);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001523 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1524 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1525 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001526 WRITE_ONCE(*entry, new_entry);
1527
1528 svm->avic_physical_id_cache = entry;
1529
1530 return 0;
1531}
1532
Brijesh Singh1654efc2017-12-04 10:57:34 -06001533static void __sev_asid_free(int asid)
1534{
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001535 struct svm_cpu_data *sd;
1536 int cpu, pos;
Brijesh Singh1654efc2017-12-04 10:57:34 -06001537
1538 pos = asid - 1;
1539 clear_bit(pos, sev_asid_bitmap);
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001540
1541 for_each_possible_cpu(cpu) {
1542 sd = per_cpu(svm_data, cpu);
1543 sd->sev_vmcbs[pos] = NULL;
1544 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001545}
1546
1547static void sev_asid_free(struct kvm *kvm)
1548{
1549 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1550
1551 __sev_asid_free(sev->asid);
1552}
1553
Brijesh Singh59414c92017-12-04 10:57:35 -06001554static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1555{
1556 struct sev_data_decommission *decommission;
1557 struct sev_data_deactivate *data;
1558
1559 if (!handle)
1560 return;
1561
1562 data = kzalloc(sizeof(*data), GFP_KERNEL);
1563 if (!data)
1564 return;
1565
1566 /* deactivate handle */
1567 data->handle = handle;
1568 sev_guest_deactivate(data, NULL);
1569
1570 wbinvd_on_all_cpus();
1571 sev_guest_df_flush(NULL);
1572 kfree(data);
1573
1574 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1575 if (!decommission)
1576 return;
1577
1578 /* decommission handle */
1579 decommission->handle = handle;
1580 sev_guest_decommission(decommission, NULL);
1581
1582 kfree(decommission);
1583}
1584
Brijesh Singh89c50582017-12-04 10:57:35 -06001585static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1586 unsigned long ulen, unsigned long *n,
1587 int write)
1588{
1589 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1590 unsigned long npages, npinned, size;
1591 unsigned long locked, lock_limit;
1592 struct page **pages;
1593 int first, last;
1594
1595 /* Calculate number of pages. */
1596 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1597 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1598 npages = (last - first + 1);
1599
1600 locked = sev->pages_locked + npages;
1601 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1602 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1603 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1604 return NULL;
1605 }
1606
1607 /* Avoid using vmalloc for smaller buffers. */
1608 size = npages * sizeof(struct page *);
1609 if (size > PAGE_SIZE)
1610 pages = vmalloc(size);
1611 else
1612 pages = kmalloc(size, GFP_KERNEL);
1613
1614 if (!pages)
1615 return NULL;
1616
1617 /* Pin the user virtual address. */
1618 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
1619 if (npinned != npages) {
1620 pr_err("SEV: Failure locking %lu pages.\n", npages);
1621 goto err;
1622 }
1623
1624 *n = npages;
1625 sev->pages_locked = locked;
1626
1627 return pages;
1628
1629err:
1630 if (npinned > 0)
1631 release_pages(pages, npinned);
1632
1633 kvfree(pages);
1634 return NULL;
1635}
1636
1637static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1638 unsigned long npages)
1639{
1640 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1641
1642 release_pages(pages, npages);
1643 kvfree(pages);
1644 sev->pages_locked -= npages;
1645}
1646
1647static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1648{
1649 uint8_t *page_virtual;
1650 unsigned long i;
1651
1652 if (npages == 0 || pages == NULL)
1653 return;
1654
1655 for (i = 0; i < npages; i++) {
1656 page_virtual = kmap_atomic(pages[i]);
1657 clflush_cache_range(page_virtual, PAGE_SIZE);
1658 kunmap_atomic(page_virtual);
1659 }
1660}
1661
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001662static void __unregister_enc_region_locked(struct kvm *kvm,
1663 struct enc_region *region)
1664{
1665 /*
1666 * The guest may change the memory encryption attribute from C=0 -> C=1
1667 * or vice versa for this memory range. Lets make sure caches are
1668 * flushed to ensure that guest data gets written into memory with
1669 * correct C-bit.
1670 */
1671 sev_clflush_pages(region->pages, region->npages);
1672
1673 sev_unpin_memory(kvm, region->pages, region->npages);
1674 list_del(&region->list);
1675 kfree(region);
1676}
1677
Brijesh Singh1654efc2017-12-04 10:57:34 -06001678static void sev_vm_destroy(struct kvm *kvm)
1679{
Brijesh Singh59414c92017-12-04 10:57:35 -06001680 struct kvm_sev_info *sev = &kvm->arch.sev_info;
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001681 struct list_head *head = &sev->regions_list;
1682 struct list_head *pos, *q;
Brijesh Singh59414c92017-12-04 10:57:35 -06001683
Brijesh Singh1654efc2017-12-04 10:57:34 -06001684 if (!sev_guest(kvm))
1685 return;
1686
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001687 mutex_lock(&kvm->lock);
1688
1689 /*
1690 * if userspace was terminated before unregistering the memory regions
1691 * then lets unpin all the registered memory.
1692 */
1693 if (!list_empty(head)) {
1694 list_for_each_safe(pos, q, head) {
1695 __unregister_enc_region_locked(kvm,
1696 list_entry(pos, struct enc_region, list));
1697 }
1698 }
1699
1700 mutex_unlock(&kvm->lock);
1701
Brijesh Singh59414c92017-12-04 10:57:35 -06001702 sev_unbind_asid(kvm, sev->handle);
Brijesh Singh1654efc2017-12-04 10:57:34 -06001703 sev_asid_free(kvm);
1704}
1705
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001706static void avic_vm_destroy(struct kvm *kvm)
1707{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001708 unsigned long flags;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001709 struct kvm_arch *vm_data = &kvm->arch;
1710
Dmitry Vyukov3863dff2017-01-24 14:06:48 +01001711 if (!avic)
1712 return;
1713
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001714 if (vm_data->avic_logical_id_table_page)
1715 __free_page(vm_data->avic_logical_id_table_page);
1716 if (vm_data->avic_physical_id_table_page)
1717 __free_page(vm_data->avic_physical_id_table_page);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001718
1719 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1720 hash_del(&vm_data->hnode);
1721 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001722}
1723
Brijesh Singh1654efc2017-12-04 10:57:34 -06001724static void svm_vm_destroy(struct kvm *kvm)
1725{
1726 avic_vm_destroy(kvm);
1727 sev_vm_destroy(kvm);
1728}
1729
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001730static int avic_vm_init(struct kvm *kvm)
1731{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001732 unsigned long flags;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001733 int err = -ENOMEM;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001734 struct kvm_arch *vm_data = &kvm->arch;
1735 struct page *p_page;
1736 struct page *l_page;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001737 struct kvm_arch *ka;
1738 u32 vm_id;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001739
1740 if (!avic)
1741 return 0;
1742
1743 /* Allocating physical APIC ID table (4KB) */
1744 p_page = alloc_page(GFP_KERNEL);
1745 if (!p_page)
1746 goto free_avic;
1747
1748 vm_data->avic_physical_id_table_page = p_page;
1749 clear_page(page_address(p_page));
1750
1751 /* Allocating logical APIC ID table (4KB) */
1752 l_page = alloc_page(GFP_KERNEL);
1753 if (!l_page)
1754 goto free_avic;
1755
1756 vm_data->avic_logical_id_table_page = l_page;
1757 clear_page(page_address(l_page));
1758
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001759 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001760 again:
1761 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1762 if (vm_id == 0) { /* id is 1-based, zero is not okay */
1763 next_vm_id_wrapped = 1;
1764 goto again;
1765 }
1766 /* Is it still in use? Only possible if wrapped at least once */
1767 if (next_vm_id_wrapped) {
1768 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1769 struct kvm *k2 = container_of(ka, struct kvm, arch);
1770 struct kvm_arch *vd2 = &k2->arch;
1771 if (vd2->avic_vm_id == vm_id)
1772 goto again;
1773 }
1774 }
1775 vm_data->avic_vm_id = vm_id;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001776 hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
1777 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1778
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001779 return 0;
1780
1781free_avic:
1782 avic_vm_destroy(kvm);
1783 return err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001784}
1785
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001786static inline int
1787avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001788{
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001789 int ret = 0;
1790 unsigned long flags;
1791 struct amd_svm_iommu_ir *ir;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001792 struct vcpu_svm *svm = to_svm(vcpu);
1793
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001794 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1795 return 0;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001796
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001797 /*
1798 * Here, we go through the per-vcpu ir_list to update all existing
1799 * interrupt remapping table entry targeting this vcpu.
1800 */
1801 spin_lock_irqsave(&svm->ir_list_lock, flags);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001802
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001803 if (list_empty(&svm->ir_list))
1804 goto out;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001805
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001806 list_for_each_entry(ir, &svm->ir_list, node) {
1807 ret = amd_iommu_update_ga(cpu, r, ir->data);
1808 if (ret)
1809 break;
1810 }
1811out:
1812 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1813 return ret;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001814}
1815
1816static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1817{
1818 u64 entry;
1819 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05001820 int h_physical_id = kvm_cpu_get_apicid(cpu);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001821 struct vcpu_svm *svm = to_svm(vcpu);
1822
1823 if (!kvm_vcpu_apicv_active(vcpu))
1824 return;
1825
1826 if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1827 return;
1828
1829 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1830 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1831
1832 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1833 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1834
1835 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1836 if (svm->avic_is_running)
1837 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1838
1839 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001840 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
1841 svm->avic_is_running);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001842}
1843
1844static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1845{
1846 u64 entry;
1847 struct vcpu_svm *svm = to_svm(vcpu);
1848
1849 if (!kvm_vcpu_apicv_active(vcpu))
1850 return;
1851
1852 entry = READ_ONCE(*(svm->avic_physical_id_cache));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001853 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1854 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1855
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001856 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1857 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001858}
1859
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001860/**
1861 * This function is called during VCPU halt/unhalt.
1862 */
1863static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1864{
1865 struct vcpu_svm *svm = to_svm(vcpu);
1866
1867 svm->avic_is_running = is_run;
1868 if (is_run)
1869 avic_vcpu_load(vcpu, vcpu->cpu);
1870 else
1871 avic_vcpu_put(vcpu);
1872}
1873
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001874static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001875{
1876 struct vcpu_svm *svm = to_svm(vcpu);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001877 u32 dummy;
1878 u32 eax = 1;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001879
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001880 if (!init_event) {
1881 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1882 MSR_IA32_APICBASE_ENABLE;
1883 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1884 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1885 }
Paolo Bonzini56908912015-10-19 11:30:19 +02001886 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001887
Yu Zhange911eb32017-08-24 20:27:52 +08001888 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001889 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001890
1891 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1892 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001893}
1894
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001895static int avic_init_vcpu(struct vcpu_svm *svm)
1896{
1897 int ret;
1898
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001899 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001900 return 0;
1901
1902 ret = avic_init_backing_page(&svm->vcpu);
1903 if (ret)
1904 return ret;
1905
1906 INIT_LIST_HEAD(&svm->ir_list);
1907 spin_lock_init(&svm->ir_list_lock);
1908
1909 return ret;
1910}
1911
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001912static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001913{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001914 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001915 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001916 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001917 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001918 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001919 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001920
Rusty Russellc16f8622007-07-30 21:12:19 +10001921 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001922 if (!svm) {
1923 err = -ENOMEM;
1924 goto out;
1925 }
1926
1927 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1928 if (err)
1929 goto free_svm;
1930
Joerg Roedelf65c2292008-02-13 18:58:46 +01001931 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001932 page = alloc_page(GFP_KERNEL);
1933 if (!page)
1934 goto uninit;
1935
Joerg Roedelf65c2292008-02-13 18:58:46 +01001936 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1937 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001938 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001939
1940 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1941 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001942 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001943
Alexander Grafb286d5d2008-11-25 20:17:05 +01001944 hsave_page = alloc_page(GFP_KERNEL);
1945 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001946 goto free_page3;
1947
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001948 err = avic_init_vcpu(svm);
1949 if (err)
1950 goto free_page4;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001951
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001952 /* We initialize this flag to true to make sure that the is_running
1953 * bit would be set the first time the vcpu is loaded.
1954 */
1955 svm->avic_is_running = true;
1956
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001957 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001958
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001959 svm->msrpm = page_address(msrpm_pages);
1960 svm_vcpu_init_msrpm(svm->msrpm);
1961
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001962 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001963 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001964
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001965 svm->vmcb = page_address(page);
1966 clear_page(svm->vmcb);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001967 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001968 svm->asid_generation = 0;
Paolo Bonzini56908912015-10-19 11:30:19 +02001969 init_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001970
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05001971 svm_init_osvw(&svm->vcpu);
1972
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001973 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001974
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001975free_page4:
1976 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001977free_page3:
1978 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1979free_page2:
1980 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1981free_page1:
1982 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001983uninit:
1984 kvm_vcpu_uninit(&svm->vcpu);
1985free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001986 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001987out:
1988 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001989}
1990
1991static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1992{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001993 struct vcpu_svm *svm = to_svm(vcpu);
1994
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001995 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001996 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001997 __free_page(virt_to_page(svm->nested.hsave));
1998 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001999 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10002000 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002001}
2002
Avi Kivity15ad7142007-07-11 18:17:21 +03002003static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002004{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002005 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002006 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02002007
Avi Kivity0cc50642007-03-25 12:07:27 +02002008 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03002009 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002010 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02002011 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002012
Avi Kivity82ca2d12010-10-21 12:20:34 +02002013#ifdef CONFIG_X86_64
2014 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
2015#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02002016 savesegment(fs, svm->host.fs);
2017 savesegment(gs, svm->host.gs);
2018 svm->host.ldt = kvm_read_ldt();
2019
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002020 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002021 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002022
Haozhong Zhangad7218832015-10-20 15:39:02 +08002023 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
2024 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2025 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
2026 __this_cpu_write(current_tsc_ratio, tsc_ratio);
2027 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
2028 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002029 }
Paolo Bonzini46896c72015-11-12 14:49:16 +01002030 /* This assumes that the kernel never uses MSR_TSC_AUX */
2031 if (static_cpu_has(X86_FEATURE_RDTSCP))
2032 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002033
2034 avic_vcpu_load(vcpu, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002035}
2036
2037static void svm_vcpu_put(struct kvm_vcpu *vcpu)
2038{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002039 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002040 int i;
2041
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002042 avic_vcpu_put(vcpu);
2043
Avi Kivitye1beb1d2007-11-18 13:50:24 +02002044 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02002045 kvm_load_ldt(svm->host.ldt);
2046#ifdef CONFIG_X86_64
2047 loadsegment(fs, svm->host.fs);
Andy Lutomirski296f7812016-04-26 12:23:29 -07002048 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01002049 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02002050#else
Avi Kivity831ca602011-03-08 16:09:51 +02002051#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02002052 loadsegment(gs, svm->host.gs);
2053#endif
Avi Kivity831ca602011-03-08 16:09:51 +02002054#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002055 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002056 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002057}
2058
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002059static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2060{
2061 avic_set_running(vcpu, false);
2062}
2063
2064static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2065{
2066 avic_set_running(vcpu, true);
2067}
2068
Avi Kivity6aa8b732006-12-10 02:21:36 -08002069static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2070{
Ladi Prosek9b611742017-06-21 09:06:59 +02002071 struct vcpu_svm *svm = to_svm(vcpu);
2072 unsigned long rflags = svm->vmcb->save.rflags;
2073
2074 if (svm->nmi_singlestep) {
2075 /* Hide our flags if they were not set by the guest */
2076 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2077 rflags &= ~X86_EFLAGS_TF;
2078 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2079 rflags &= ~X86_EFLAGS_RF;
2080 }
2081 return rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002082}
2083
2084static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2085{
Ladi Prosek9b611742017-06-21 09:06:59 +02002086 if (to_svm(vcpu)->nmi_singlestep)
2087 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2088
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002089 /*
Andrea Gelminibb3541f2016-05-21 14:14:44 +02002090 * Any change of EFLAGS.VM is accompanied by a reload of SS
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002091 * (caused by either a task switch or an inter-privilege IRET),
2092 * so we do not need to update the CPL here.
2093 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002094 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002095}
2096
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002097static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2098{
2099 switch (reg) {
2100 case VCPU_EXREG_PDPTR:
2101 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002102 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002103 break;
2104 default:
2105 BUG();
2106 }
2107}
2108
Alexander Graff0b85052008-11-25 20:17:01 +01002109static void svm_set_vintr(struct vcpu_svm *svm)
2110{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002111 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002112}
2113
2114static void svm_clear_vintr(struct vcpu_svm *svm)
2115{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002116 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002117}
2118
Avi Kivity6aa8b732006-12-10 02:21:36 -08002119static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2120{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002121 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002122
2123 switch (seg) {
2124 case VCPU_SREG_CS: return &save->cs;
2125 case VCPU_SREG_DS: return &save->ds;
2126 case VCPU_SREG_ES: return &save->es;
2127 case VCPU_SREG_FS: return &save->fs;
2128 case VCPU_SREG_GS: return &save->gs;
2129 case VCPU_SREG_SS: return &save->ss;
2130 case VCPU_SREG_TR: return &save->tr;
2131 case VCPU_SREG_LDTR: return &save->ldtr;
2132 }
2133 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00002134 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002135}
2136
2137static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2138{
2139 struct vmcb_seg *s = svm_seg(vcpu, seg);
2140
2141 return s->base;
2142}
2143
2144static void svm_get_segment(struct kvm_vcpu *vcpu,
2145 struct kvm_segment *var, int seg)
2146{
2147 struct vmcb_seg *s = svm_seg(vcpu, seg);
2148
2149 var->base = s->base;
2150 var->limit = s->limit;
2151 var->selector = s->selector;
2152 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2153 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2154 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2155 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2156 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2157 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2158 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
Jim Mattson80112c82014-07-08 09:47:41 +05302159
2160 /*
2161 * AMD CPUs circa 2014 track the G bit for all segments except CS.
2162 * However, the SVM spec states that the G bit is not observed by the
2163 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2164 * So let's synthesize a legal G bit for all segments, this helps
2165 * running KVM nested. It also helps cross-vendor migration, because
2166 * Intel's vmentry has a check on the 'G' bit.
2167 */
2168 var->g = s->limit > 0xfffff;
Amit Shah25022ac2008-10-27 09:04:17 +00002169
Joerg Roedele0231712010-02-24 18:59:10 +01002170 /*
2171 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02002172 * for cross vendor migration purposes by "not present"
2173 */
Gioh Kim8eae9572017-05-30 15:24:45 +02002174 var->unusable = !var->present;
Andre Przywara19bca6a2009-04-28 12:45:30 +02002175
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002176 switch (seg) {
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002177 case VCPU_SREG_TR:
2178 /*
2179 * Work around a bug where the busy flag in the tr selector
2180 * isn't exposed
2181 */
Amit Shahc0d09822008-10-27 09:04:18 +00002182 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002183 break;
2184 case VCPU_SREG_DS:
2185 case VCPU_SREG_ES:
2186 case VCPU_SREG_FS:
2187 case VCPU_SREG_GS:
2188 /*
2189 * The accessed bit must always be set in the segment
2190 * descriptor cache, although it can be cleared in the
2191 * descriptor, the cached bit always remains at 1. Since
2192 * Intel has a check on this, set it here to support
2193 * cross-vendor migration.
2194 */
2195 if (!var->unusable)
2196 var->type |= 0x1;
2197 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02002198 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01002199 /*
2200 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02002201 * descriptor is left as 1, although the whole segment has
2202 * been made unusable. Clear it here to pass an Intel VMX
2203 * entry check when cross vendor migrating.
2204 */
2205 if (var->unusable)
2206 var->db = 0;
Roman Pend9c1b542017-06-01 10:55:03 +02002207 /* This is symmetric with svm_set_segment() */
Jan Kiszka33b458d2014-06-29 17:12:43 +02002208 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
Andre Przywarab586eb02009-04-28 12:45:43 +02002209 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002210 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002211}
2212
Izik Eidus2e4d2652008-03-24 19:38:34 +02002213static int svm_get_cpl(struct kvm_vcpu *vcpu)
2214{
2215 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2216
2217 return save->cpl;
2218}
2219
Gleb Natapov89a27f42010-02-16 10:51:48 +02002220static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002221{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002222 struct vcpu_svm *svm = to_svm(vcpu);
2223
Gleb Natapov89a27f42010-02-16 10:51:48 +02002224 dt->size = svm->vmcb->save.idtr.limit;
2225 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002226}
2227
Gleb Natapov89a27f42010-02-16 10:51:48 +02002228static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002229{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002230 struct vcpu_svm *svm = to_svm(vcpu);
2231
Gleb Natapov89a27f42010-02-16 10:51:48 +02002232 svm->vmcb->save.idtr.limit = dt->size;
2233 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002234 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002235}
2236
Gleb Natapov89a27f42010-02-16 10:51:48 +02002237static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002238{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002239 struct vcpu_svm *svm = to_svm(vcpu);
2240
Gleb Natapov89a27f42010-02-16 10:51:48 +02002241 dt->size = svm->vmcb->save.gdtr.limit;
2242 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002243}
2244
Gleb Natapov89a27f42010-02-16 10:51:48 +02002245static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002246{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002247 struct vcpu_svm *svm = to_svm(vcpu);
2248
Gleb Natapov89a27f42010-02-16 10:51:48 +02002249 svm->vmcb->save.gdtr.limit = dt->size;
2250 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002251 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002252}
2253
Avi Kivitye8467fd2009-12-29 18:43:06 +02002254static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2255{
2256}
2257
Avi Kivityaff48ba2010-12-05 18:56:11 +02002258static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2259{
2260}
2261
Anthony Liguori25c4c272007-04-27 09:29:21 +03002262static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08002263{
2264}
2265
Avi Kivityd2251572010-01-06 10:55:27 +02002266static void update_cr0_intercept(struct vcpu_svm *svm)
2267{
2268 ulong gcr0 = svm->vcpu.arch.cr0;
2269 u64 *hcr0 = &svm->vmcb->save.cr0;
2270
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002271 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2272 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
Avi Kivityd2251572010-01-06 10:55:27 +02002273
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002274 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002275
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002276 if (gcr0 == *hcr0) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002277 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2278 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002279 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002280 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2281 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002282 }
2283}
2284
Avi Kivity6aa8b732006-12-10 02:21:36 -08002285static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2286{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002287 struct vcpu_svm *svm = to_svm(vcpu);
2288
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002289#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02002290 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10002291 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002292 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002293 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002294 }
2295
Mike Dayd77c26f2007-10-08 09:02:08 -04002296 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002297 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002298 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002299 }
2300 }
2301#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002302 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02002303
2304 if (!npt_enabled)
2305 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02002306
Paolo Bonzinibcf166a2015-10-01 13:19:55 +02002307 /*
2308 * re-enable caching here because the QEMU bios
2309 * does not do it - this results in some delay at
2310 * reboot
2311 */
2312 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2313 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002314 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002315 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002316 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002317}
2318
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002319static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002320{
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07002321 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002322 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2323
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002324 if (cr4 & X86_CR4_VMXE)
2325 return 1;
2326
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002327 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002328 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02002329
Joerg Roedelec077262008-04-09 14:15:28 +02002330 vcpu->arch.cr4 = cr4;
2331 if (!npt_enabled)
2332 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02002333 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02002334 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002335 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002336 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002337}
2338
2339static void svm_set_segment(struct kvm_vcpu *vcpu,
2340 struct kvm_segment *var, int seg)
2341{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002342 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002343 struct vmcb_seg *s = svm_seg(vcpu, seg);
2344
2345 s->base = var->base;
2346 s->limit = var->limit;
2347 s->selector = var->selector;
Roman Pend9c1b542017-06-01 10:55:03 +02002348 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2349 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2350 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2351 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2352 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2353 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2354 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2355 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002356
2357 /*
2358 * This is always accurate, except if SYSRET returned to a segment
2359 * with SS.DPL != 3. Intel does not have this quirk, and always
2360 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2361 * would entail passing the CPL to userspace and back.
2362 */
2363 if (seg == VCPU_SREG_SS)
Roman Pend9c1b542017-06-01 10:55:03 +02002364 /* This is symmetric with svm_get_segment() */
2365 svm->vmcb->save.cpl = (var->dpl & 3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002366
Joerg Roedel060d0c92010-12-03 11:45:57 +01002367 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002368}
2369
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01002370static void update_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002371{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002372 struct vcpu_svm *svm = to_svm(vcpu);
2373
Joerg Roedel18c918c2010-11-30 18:03:59 +01002374 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03002375
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002376 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002377 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01002378 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002379 } else
2380 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03002381}
2382
Tejun Heo0fe1e002009-10-29 22:34:14 +09002383static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002384{
Tejun Heo0fe1e002009-10-29 22:34:14 +09002385 if (sd->next_asid > sd->max_asid) {
2386 ++sd->asid_generation;
Brijesh Singh4faefff2017-12-04 10:57:25 -06002387 sd->next_asid = sd->min_asid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002388 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002389 }
2390
Tejun Heo0fe1e002009-10-29 22:34:14 +09002391 svm->asid_generation = sd->asid_generation;
2392 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01002393
2394 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002395}
2396
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01002397static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2398{
2399 return to_svm(vcpu)->vmcb->save.dr6;
2400}
2401
2402static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2403{
2404 struct vcpu_svm *svm = to_svm(vcpu);
2405
2406 svm->vmcb->save.dr6 = value;
2407 mark_dirty(svm->vmcb, VMCB_DR);
2408}
2409
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002410static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2411{
2412 struct vcpu_svm *svm = to_svm(vcpu);
2413
2414 get_debugreg(vcpu->arch.db[0], 0);
2415 get_debugreg(vcpu->arch.db[1], 1);
2416 get_debugreg(vcpu->arch.db[2], 2);
2417 get_debugreg(vcpu->arch.db[3], 3);
2418 vcpu->arch.dr6 = svm_get_dr6(vcpu);
2419 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2420
2421 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2422 set_dr_intercepts(svm);
2423}
2424
Gleb Natapov020df072010-04-13 10:05:23 +03002425static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002426{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002427 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002428
Gleb Natapov020df072010-04-13 10:05:23 +03002429 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01002430 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002431}
2432
Avi Kivity851ba692009-08-24 11:10:17 +03002433static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002434{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06002435 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002436 u64 error_code = svm->vmcb->control.exit_info_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002437
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002438 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06002439 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2440 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02002441 svm->vmcb->control.insn_len);
2442}
2443
2444static int npf_interception(struct vcpu_svm *svm)
2445{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06002446 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Paolo Bonzinid0006532017-08-11 18:36:43 +02002447 u64 error_code = svm->vmcb->control.exit_info_1;
2448
2449 trace_kvm_page_fault(fault_address, error_code);
2450 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06002451 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2452 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02002453 svm->vmcb->control.insn_len);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002454}
2455
Avi Kivity851ba692009-08-24 11:10:17 +03002456static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002457{
Avi Kivity851ba692009-08-24 11:10:17 +03002458 struct kvm_run *kvm_run = svm->vcpu.run;
2459
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002460 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03002461 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02002462 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002463 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2464 return 1;
2465 }
Gleb Natapov44c11432009-05-11 13:35:52 +03002466
Jan Kiszka6be7d302009-10-18 13:24:54 +02002467 if (svm->nmi_singlestep) {
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02002468 disable_nmi_singlestep(svm);
Gleb Natapov44c11432009-05-11 13:35:52 +03002469 }
2470
2471 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01002472 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03002473 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2474 kvm_run->debug.arch.pc =
2475 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2476 kvm_run->debug.arch.exception = DB_VECTOR;
2477 return 0;
2478 }
2479
2480 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002481}
2482
Avi Kivity851ba692009-08-24 11:10:17 +03002483static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002484{
Avi Kivity851ba692009-08-24 11:10:17 +03002485 struct kvm_run *kvm_run = svm->vcpu.run;
2486
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002487 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2488 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2489 kvm_run->debug.arch.exception = BP_VECTOR;
2490 return 0;
2491}
2492
Avi Kivity851ba692009-08-24 11:10:17 +03002493static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002494{
2495 int er;
2496
Andre Przywara51d8b662010-12-21 11:12:02 +01002497 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002498 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002499 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002500 return 1;
2501}
2502
Eric Northup54a20552015-11-03 18:03:53 +01002503static int ac_interception(struct vcpu_svm *svm)
2504{
2505 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2506 return 1;
2507}
2508
Joerg Roedel67ec6602010-05-17 14:43:35 +02002509static bool is_erratum_383(void)
2510{
2511 int err, i;
2512 u64 value;
2513
2514 if (!erratum_383_found)
2515 return false;
2516
2517 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2518 if (err)
2519 return false;
2520
2521 /* Bit 62 may or may not be set for this mce */
2522 value &= ~(1ULL << 62);
2523
2524 if (value != 0xb600000000010015ULL)
2525 return false;
2526
2527 /* Clear MCi_STATUS registers */
2528 for (i = 0; i < 6; ++i)
2529 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2530
2531 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2532 if (!err) {
2533 u32 low, high;
2534
2535 value &= ~(1ULL << 2);
2536 low = lower_32_bits(value);
2537 high = upper_32_bits(value);
2538
2539 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2540 }
2541
2542 /* Flush tlb to evict multi-match entries */
2543 __flush_tlb_all();
2544
2545 return true;
2546}
2547
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002548static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02002549{
Joerg Roedel67ec6602010-05-17 14:43:35 +02002550 if (is_erratum_383()) {
2551 /*
2552 * Erratum 383 triggered. Guest state is corrupt so kill the
2553 * guest.
2554 */
2555 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2556
Avi Kivitya8eeb042010-05-10 12:34:53 +03002557 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02002558
2559 return;
2560 }
2561
Joerg Roedel53371b52008-04-09 14:15:30 +02002562 /*
2563 * On an #MC intercept the MCE handler is not called automatically in
2564 * the host. So do it by hand here.
2565 */
2566 asm volatile (
2567 "int $0x12\n");
2568 /* not sure if we ever come back to this point */
2569
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002570 return;
2571}
2572
2573static int mc_interception(struct vcpu_svm *svm)
2574{
Joerg Roedel53371b52008-04-09 14:15:30 +02002575 return 1;
2576}
2577
Avi Kivity851ba692009-08-24 11:10:17 +03002578static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002579{
Avi Kivity851ba692009-08-24 11:10:17 +03002580 struct kvm_run *kvm_run = svm->vcpu.run;
2581
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002582 /*
2583 * VMCB is undefined after a SHUTDOWN intercept
2584 * so reinitialize it.
2585 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002586 clear_page(svm->vmcb);
Paolo Bonzini56908912015-10-19 11:30:19 +02002587 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002588
2589 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2590 return 0;
2591}
2592
Avi Kivity851ba692009-08-24 11:10:17 +03002593static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002594{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002595 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04002596 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002597 int size, in, string, ret;
Avi Kivity039576c2007-03-20 12:46:50 +02002598 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002599
Rusty Russelle756fc62007-07-30 20:07:08 +10002600 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03002601 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002602 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Tom Lendacky8370c3d2016-11-23 12:01:50 -05002603 if (string)
Andre Przywara51d8b662010-12-21 11:12:02 +01002604 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002605
Avi Kivity039576c2007-03-20 12:46:50 +02002606 port = io_info >> 16;
2607 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002608 svm->next_rip = svm->vmcb->control.exit_info_2;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002609 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002610
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002611 /*
2612 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
2613 * KVM_EXIT_DEBUG here.
2614 */
2615 if (in)
2616 return kvm_fast_pio_in(vcpu, size, port) && ret;
2617 else
2618 return kvm_fast_pio_out(vcpu, size, port) && ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002619}
2620
Avi Kivity851ba692009-08-24 11:10:17 +03002621static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02002622{
2623 return 1;
2624}
2625
Avi Kivity851ba692009-08-24 11:10:17 +03002626static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02002627{
2628 ++svm->vcpu.stat.irq_exits;
2629 return 1;
2630}
2631
Avi Kivity851ba692009-08-24 11:10:17 +03002632static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002633{
2634 return 1;
2635}
2636
Avi Kivity851ba692009-08-24 11:10:17 +03002637static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002638{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002639 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10002640 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002641}
2642
Avi Kivity851ba692009-08-24 11:10:17 +03002643static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02002644{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002645 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Andrey Smetanin0d9c0552016-02-11 16:44:59 +03002646 return kvm_emulate_hypercall(&svm->vcpu);
Avi Kivity02e235b2007-02-19 14:37:47 +02002647}
2648
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002649static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2650{
2651 struct vcpu_svm *svm = to_svm(vcpu);
2652
2653 return svm->nested.nested_cr3;
2654}
2655
Avi Kivitye4e517b2011-07-28 11:36:17 +03002656static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2657{
2658 struct vcpu_svm *svm = to_svm(vcpu);
2659 u64 cr3 = svm->nested.nested_cr3;
2660 u64 pdpte;
2661 int ret;
2662
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002663 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002664 offset_in_page(cr3) + index * 8, 8);
Avi Kivitye4e517b2011-07-28 11:36:17 +03002665 if (ret)
2666 return 0;
2667 return pdpte;
2668}
2669
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002670static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2671 unsigned long root)
2672{
2673 struct vcpu_svm *svm = to_svm(vcpu);
2674
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002675 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01002676 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002677 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002678}
2679
Avi Kivity6389ee92010-11-29 16:12:30 +02002680static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2681 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002682{
2683 struct vcpu_svm *svm = to_svm(vcpu);
2684
Paolo Bonzini5e352512014-09-02 13:18:37 +02002685 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2686 /*
2687 * TODO: track the cause of the nested page fault, and
2688 * correctly fill in the high bits of exit_info_1.
2689 */
2690 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2691 svm->vmcb->control.exit_code_hi = 0;
2692 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2693 svm->vmcb->control.exit_info_2 = fault->address;
2694 }
2695
2696 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2697 svm->vmcb->control.exit_info_1 |= fault->error_code;
2698
2699 /*
2700 * The present bit is always zero for page structure faults on real
2701 * hardware.
2702 */
2703 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2704 svm->vmcb->control.exit_info_1 &= ~1;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002705
2706 nested_svm_vmexit(svm);
2707}
2708
Paolo Bonzini8a3c1a332013-10-02 16:56:13 +02002709static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +02002710{
Paolo Bonziniad896af2013-10-02 16:56:14 +02002711 WARN_ON(mmu_is_nested(vcpu));
2712 kvm_init_shadow_mmu(vcpu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002713 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
2714 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
Avi Kivitye4e517b2011-07-28 11:36:17 +03002715 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
Joerg Roedel4b161842010-09-10 17:31:03 +02002716 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
Yu Zhang855feb62017-08-24 20:27:55 +08002717 vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu);
Xiao Guangrongc258b622015-08-05 12:04:24 +08002718 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002719 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
Joerg Roedel4b161842010-09-10 17:31:03 +02002720}
2721
2722static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2723{
2724 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2725}
2726
Alexander Grafc0725422008-11-25 20:17:03 +01002727static int nested_svm_check_permissions(struct vcpu_svm *svm)
2728{
Dan Carpentere9196ce2017-05-18 10:39:53 +03002729 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2730 !is_paging(&svm->vcpu)) {
Alexander Grafc0725422008-11-25 20:17:03 +01002731 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2732 return 1;
2733 }
2734
2735 if (svm->vmcb->save.cpl) {
2736 kvm_inject_gp(&svm->vcpu, 0);
2737 return 1;
2738 }
2739
Dan Carpentere9196ce2017-05-18 10:39:53 +03002740 return 0;
Alexander Grafc0725422008-11-25 20:17:03 +01002741}
2742
Alexander Grafcf74a782008-11-25 20:17:08 +01002743static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2744 bool has_error_code, u32 error_code)
2745{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002746 int vmexit;
2747
Joerg Roedel20307532010-11-29 17:51:48 +01002748 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02002749 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002750
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002751 vmexit = nested_svm_intercept(svm);
2752 if (vmexit != NESTED_EXIT_DONE)
2753 return 0;
2754
Joerg Roedel0295ad72009-08-07 11:49:37 +02002755 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2756 svm->vmcb->control.exit_code_hi = 0;
2757 svm->vmcb->control.exit_info_1 = error_code;
Paolo Bonzinib96fb432017-07-27 12:29:32 +02002758
2759 /*
2760 * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
2761 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2762 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
2763 * written only when inject_pending_event runs (DR6 would written here
2764 * too). This should be conditional on a new capability---if the
2765 * capability is disabled, kvm_multiple_exception would write the
2766 * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
2767 */
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002768 if (svm->vcpu.arch.exception.nested_apf)
2769 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
2770 else
2771 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
Joerg Roedel0295ad72009-08-07 11:49:37 +02002772
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002773 svm->nested.exit_required = true;
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002774 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002775}
2776
Joerg Roedel8fe54652010-02-19 16:23:01 +01002777/* This function returns true if it is save to enable the irq window */
2778static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002779{
Joerg Roedel20307532010-11-29 17:51:48 +01002780 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002781 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002782
Joerg Roedel26666952009-08-07 11:49:46 +02002783 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002784 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002785
Joerg Roedel26666952009-08-07 11:49:46 +02002786 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002787 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002788
Gleb Natapova0a07cd2010-09-20 10:15:32 +02002789 /*
2790 * if vmexit was already requested (by intercepted exception
2791 * for instance) do not overwrite it with "external interrupt"
2792 * vmexit.
2793 */
2794 if (svm->nested.exit_required)
2795 return false;
2796
Joerg Roedel197717d2010-02-24 18:59:19 +01002797 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2798 svm->vmcb->control.exit_info_1 = 0;
2799 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02002800
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002801 if (svm->nested.intercept & 1ULL) {
2802 /*
2803 * The #vmexit can't be emulated here directly because this
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002804 * code path runs with irqs and preemption disabled. A
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002805 * #vmexit emulation might sleep. Only signal request for
2806 * the #vmexit here.
2807 */
2808 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02002809 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01002810 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002811 }
2812
Joerg Roedel8fe54652010-02-19 16:23:01 +01002813 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002814}
2815
Joerg Roedel887f5002010-02-24 18:59:12 +01002816/* This function returns true if it is save to enable the nmi window */
2817static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2818{
Joerg Roedel20307532010-11-29 17:51:48 +01002819 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01002820 return true;
2821
2822 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2823 return true;
2824
2825 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2826 svm->nested.exit_required = true;
2827
2828 return false;
2829}
2830
Joerg Roedel7597f122010-02-19 16:23:00 +01002831static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002832{
2833 struct page *page;
2834
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01002835 might_sleep();
2836
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002837 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002838 if (is_error_page(page))
2839 goto error;
2840
Joerg Roedel7597f122010-02-19 16:23:00 +01002841 *_page = page;
2842
2843 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002844
2845error:
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002846 kvm_inject_gp(&svm->vcpu, 0);
2847
2848 return NULL;
2849}
2850
Joerg Roedel7597f122010-02-19 16:23:00 +01002851static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002852{
Joerg Roedel7597f122010-02-19 16:23:00 +01002853 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002854 kvm_release_page_dirty(page);
2855}
2856
Joerg Roedelce2ac082010-03-01 15:34:39 +01002857static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002858{
Jan Kiszka9bf41832014-06-30 10:54:17 +02002859 unsigned port, size, iopm_len;
2860 u16 val, mask;
2861 u8 start_bit;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002862 u64 gpa;
2863
2864 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2865 return NESTED_EXIT_HOST;
2866
2867 port = svm->vmcb->control.exit_info_1 >> 16;
Jan Kiszka9bf41832014-06-30 10:54:17 +02002868 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2869 SVM_IOIO_SIZE_SHIFT;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002870 gpa = svm->nested.vmcb_iopm + (port / 8);
Jan Kiszka9bf41832014-06-30 10:54:17 +02002871 start_bit = port % 8;
2872 iopm_len = (start_bit + size > 8) ? 2 : 1;
2873 mask = (0xf >> (4 - size)) << start_bit;
2874 val = 0;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002875
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002876 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
Jan Kiszka9bf41832014-06-30 10:54:17 +02002877 return NESTED_EXIT_DONE;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002878
Jan Kiszka9bf41832014-06-30 10:54:17 +02002879 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002880}
2881
Joerg Roedeld2477822010-03-01 15:34:34 +01002882static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002883{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002884 u32 offset, msr, value;
2885 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002886
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002887 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002888 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002889
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002890 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2891 offset = svm_msrpm_offset(msr);
2892 write = svm->vmcb->control.exit_info_1 & 1;
2893 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002894
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002895 if (offset == MSR_INVALID)
2896 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002897
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002898 /* Offset is in 32 bit units but need in 8 bit units */
2899 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002900
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002901 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002902 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002903
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002904 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002905}
2906
Ladi Prosekab2f4d732017-06-21 09:06:58 +02002907/* DB exceptions for our internal use must not cause vmexit */
2908static int nested_svm_intercept_db(struct vcpu_svm *svm)
2909{
2910 unsigned long dr6;
2911
2912 /* if we're not singlestepping, it's not ours */
2913 if (!svm->nmi_singlestep)
2914 return NESTED_EXIT_DONE;
2915
2916 /* if it's not a singlestep exception, it's not ours */
2917 if (kvm_get_dr(&svm->vcpu, 6, &dr6))
2918 return NESTED_EXIT_DONE;
2919 if (!(dr6 & DR6_BS))
2920 return NESTED_EXIT_DONE;
2921
2922 /* if the guest is singlestepping, it should get the vmexit */
2923 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
2924 disable_nmi_singlestep(svm);
2925 return NESTED_EXIT_DONE;
2926 }
2927
2928 /* it's ours, the nested hypervisor must not see this one */
2929 return NESTED_EXIT_HOST;
2930}
2931
Joerg Roedel410e4d52009-08-07 11:49:44 +02002932static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002933{
Alexander Grafcf74a782008-11-25 20:17:08 +01002934 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002935
Joerg Roedel410e4d52009-08-07 11:49:44 +02002936 switch (exit_code) {
2937 case SVM_EXIT_INTR:
2938 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02002939 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02002940 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002941 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01002942 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02002943 if (npt_enabled)
2944 return NESTED_EXIT_HOST;
2945 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002946 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002947 /* When we're shadowing, trap PFs, but not async PF */
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002948 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002949 return NESTED_EXIT_HOST;
2950 break;
2951 default:
2952 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002953 }
2954
Joerg Roedel410e4d52009-08-07 11:49:44 +02002955 return NESTED_EXIT_CONTINUE;
2956}
2957
2958/*
2959 * If this function returns true, this #vmexit was already handled
2960 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002961static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002962{
2963 u32 exit_code = svm->vmcb->control.exit_code;
2964 int vmexit = NESTED_EXIT_HOST;
2965
Alexander Grafcf74a782008-11-25 20:17:08 +01002966 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002967 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002968 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002969 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002970 case SVM_EXIT_IOIO:
2971 vmexit = nested_svm_intercept_ioio(svm);
2972 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002973 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2974 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2975 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002976 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002977 break;
2978 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002979 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2980 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2981 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002982 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002983 break;
2984 }
2985 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2986 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Ladi Prosekab2f4d732017-06-21 09:06:58 +02002987 if (svm->nested.intercept_exceptions & excp_bits) {
2988 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
2989 vmexit = nested_svm_intercept_db(svm);
2990 else
2991 vmexit = NESTED_EXIT_DONE;
2992 }
Gleb Natapov631bc482010-10-14 11:22:52 +02002993 /* async page fault always cause vmexit */
2994 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002995 svm->vcpu.arch.exception.nested_apf != 0)
Gleb Natapov631bc482010-10-14 11:22:52 +02002996 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002997 break;
2998 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002999 case SVM_EXIT_ERR: {
3000 vmexit = NESTED_EXIT_DONE;
3001 break;
3002 }
Alexander Grafcf74a782008-11-25 20:17:08 +01003003 default: {
3004 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02003005 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02003006 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01003007 }
3008 }
3009
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01003010 return vmexit;
3011}
3012
3013static int nested_svm_exit_handled(struct vcpu_svm *svm)
3014{
3015 int vmexit;
3016
3017 vmexit = nested_svm_intercept(svm);
3018
3019 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003020 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003021
3022 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01003023}
3024
Joerg Roedel0460a972009-08-07 11:49:31 +02003025static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
3026{
3027 struct vmcb_control_area *dst = &dst_vmcb->control;
3028 struct vmcb_control_area *from = &from_vmcb->control;
3029
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003030 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01003031 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02003032 dst->intercept_exceptions = from->intercept_exceptions;
3033 dst->intercept = from->intercept;
3034 dst->iopm_base_pa = from->iopm_base_pa;
3035 dst->msrpm_base_pa = from->msrpm_base_pa;
3036 dst->tsc_offset = from->tsc_offset;
3037 dst->asid = from->asid;
3038 dst->tlb_ctl = from->tlb_ctl;
3039 dst->int_ctl = from->int_ctl;
3040 dst->int_vector = from->int_vector;
3041 dst->int_state = from->int_state;
3042 dst->exit_code = from->exit_code;
3043 dst->exit_code_hi = from->exit_code_hi;
3044 dst->exit_info_1 = from->exit_info_1;
3045 dst->exit_info_2 = from->exit_info_2;
3046 dst->exit_int_info = from->exit_int_info;
3047 dst->exit_int_info_err = from->exit_int_info_err;
3048 dst->nested_ctl = from->nested_ctl;
3049 dst->event_inj = from->event_inj;
3050 dst->event_inj_err = from->event_inj_err;
3051 dst->nested_cr3 = from->nested_cr3;
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003052 dst->virt_ext = from->virt_ext;
Joerg Roedel0460a972009-08-07 11:49:31 +02003053}
3054
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003055static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01003056{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003057 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003058 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02003059 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003060 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01003061
Joerg Roedel17897f32009-10-09 16:08:29 +02003062 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3063 vmcb->control.exit_info_1,
3064 vmcb->control.exit_info_2,
3065 vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01003066 vmcb->control.exit_int_info_err,
3067 KVM_ISA_SVM);
Joerg Roedel17897f32009-10-09 16:08:29 +02003068
Joerg Roedel7597f122010-02-19 16:23:00 +01003069 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003070 if (!nested_vmcb)
3071 return 1;
3072
Joerg Roedel20307532010-11-29 17:51:48 +01003073 /* Exit Guest-Mode */
3074 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01003075 svm->nested.vmcb = 0;
3076
Alexander Grafcf74a782008-11-25 20:17:08 +01003077 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02003078 disable_gif(svm);
3079
3080 nested_vmcb->save.es = vmcb->save.es;
3081 nested_vmcb->save.cs = vmcb->save.cs;
3082 nested_vmcb->save.ss = vmcb->save.ss;
3083 nested_vmcb->save.ds = vmcb->save.ds;
3084 nested_vmcb->save.gdtr = vmcb->save.gdtr;
3085 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02003086 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003087 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02003088 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003089 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003090 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03003091 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003092 nested_vmcb->save.rip = vmcb->save.rip;
3093 nested_vmcb->save.rsp = vmcb->save.rsp;
3094 nested_vmcb->save.rax = vmcb->save.rax;
3095 nested_vmcb->save.dr7 = vmcb->save.dr7;
3096 nested_vmcb->save.dr6 = vmcb->save.dr6;
3097 nested_vmcb->save.cpl = vmcb->save.cpl;
3098
3099 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
3100 nested_vmcb->control.int_vector = vmcb->control.int_vector;
3101 nested_vmcb->control.int_state = vmcb->control.int_state;
3102 nested_vmcb->control.exit_code = vmcb->control.exit_code;
3103 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
3104 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
3105 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
3106 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
3107 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel6092d3d2015-10-14 15:10:54 +02003108
3109 if (svm->nrips_enabled)
3110 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02003111
3112 /*
3113 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3114 * to make sure that we do not lose injected events. So check event_inj
3115 * here and copy it to exit_int_info if it is valid.
3116 * Exit_int_info and event_inj can't be both valid because the case
3117 * below only happens on a VMRUN instruction intercept which has
3118 * no valid exit_int_info set.
3119 */
3120 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3121 struct vmcb_control_area *nc = &nested_vmcb->control;
3122
3123 nc->exit_int_info = vmcb->control.event_inj;
3124 nc->exit_int_info_err = vmcb->control.event_inj_err;
3125 }
3126
Joerg Roedel33740e42009-08-07 11:49:29 +02003127 nested_vmcb->control.tlb_ctl = 0;
3128 nested_vmcb->control.event_inj = 0;
3129 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01003130
3131 /* We always set V_INTR_MASKING and remember the old value in hflags */
3132 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3133 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3134
Alexander Grafcf74a782008-11-25 20:17:08 +01003135 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02003136 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01003137
Alexander Graf219b65d2009-06-15 15:21:25 +02003138 kvm_clear_exception_queue(&svm->vcpu);
3139 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003140
Joerg Roedel4b161842010-09-10 17:31:03 +02003141 svm->nested.nested_cr3 = 0;
3142
Alexander Grafcf74a782008-11-25 20:17:08 +01003143 /* Restore selected save entries */
3144 svm->vmcb->save.es = hsave->save.es;
3145 svm->vmcb->save.cs = hsave->save.cs;
3146 svm->vmcb->save.ss = hsave->save.ss;
3147 svm->vmcb->save.ds = hsave->save.ds;
3148 svm->vmcb->save.gdtr = hsave->save.gdtr;
3149 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003150 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01003151 svm_set_efer(&svm->vcpu, hsave->save.efer);
3152 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3153 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3154 if (npt_enabled) {
3155 svm->vmcb->save.cr3 = hsave->save.cr3;
3156 svm->vcpu.arch.cr3 = hsave->save.cr3;
3157 } else {
Avi Kivity23902182010-06-10 17:02:16 +03003158 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01003159 }
3160 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
3161 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
3162 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
3163 svm->vmcb->save.dr7 = 0;
3164 svm->vmcb->save.cpl = 0;
3165 svm->vmcb->control.exit_int_info = 0;
3166
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003167 mark_all_dirty(svm->vmcb);
3168
Joerg Roedel7597f122010-02-19 16:23:00 +01003169 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01003170
Joerg Roedel4b161842010-09-10 17:31:03 +02003171 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003172 kvm_mmu_reset_context(&svm->vcpu);
3173 kvm_mmu_load(&svm->vcpu);
3174
3175 return 0;
3176}
Alexander Graf3d6368e2008-11-25 20:17:07 +01003177
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003178static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003179{
Joerg Roedel323c3d82010-03-01 15:34:37 +01003180 /*
3181 * This function merges the msr permission bitmaps of kvm and the
Guo Chaoc5ec2e52012-06-28 15:16:43 +08003182 * nested vmcb. It is optimized in that it only merges the parts where
Joerg Roedel323c3d82010-03-01 15:34:37 +01003183 * the kvm msr permission bitmap may contain zero bits
3184 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01003185 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003186
Joerg Roedel323c3d82010-03-01 15:34:37 +01003187 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3188 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003189
Joerg Roedel323c3d82010-03-01 15:34:37 +01003190 for (i = 0; i < MSRPM_OFFSETS; i++) {
3191 u32 value, p;
3192 u64 offset;
3193
3194 if (msrpm_offsets[i] == 0xffffffff)
3195 break;
3196
Joerg Roedel0d6b3532010-03-01 15:34:38 +01003197 p = msrpm_offsets[i];
3198 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01003199
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02003200 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
Joerg Roedel323c3d82010-03-01 15:34:37 +01003201 return false;
3202
3203 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3204 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003205
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05003206 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
Alexander Graf3d6368e2008-11-25 20:17:07 +01003207
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003208 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003209}
3210
Joerg Roedel52c65a302010-08-02 16:46:44 +02003211static bool nested_vmcb_checks(struct vmcb *vmcb)
3212{
3213 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3214 return false;
3215
Joerg Roedeldbe77582010-08-02 16:46:45 +02003216 if (vmcb->control.asid == 0)
3217 return false;
3218
Tom Lendackycea3a192017-12-04 10:57:24 -06003219 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3220 !npt_enabled)
Joerg Roedel4b161842010-09-10 17:31:03 +02003221 return false;
3222
Joerg Roedel52c65a302010-08-02 16:46:44 +02003223 return true;
3224}
3225
Ladi Prosekc2634062017-10-11 16:54:44 +02003226static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3227 struct vmcb *nested_vmcb, struct page *page)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003228{
Avi Kivityf6e78472010-08-02 15:30:20 +03003229 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003230 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3231 else
3232 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3233
Tom Lendackycea3a192017-12-04 10:57:24 -06003234 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
Joerg Roedel4b161842010-09-10 17:31:03 +02003235 kvm_mmu_unload(&svm->vcpu);
3236 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3237 nested_svm_init_mmu_context(&svm->vcpu);
3238 }
3239
Alexander Graf3d6368e2008-11-25 20:17:07 +01003240 /* Load the nested guest state */
3241 svm->vmcb->save.es = nested_vmcb->save.es;
3242 svm->vmcb->save.cs = nested_vmcb->save.cs;
3243 svm->vmcb->save.ss = nested_vmcb->save.ss;
3244 svm->vmcb->save.ds = nested_vmcb->save.ds;
3245 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3246 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003247 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003248 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3249 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3250 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3251 if (npt_enabled) {
3252 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3253 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003254 } else
Avi Kivity23902182010-06-10 17:02:16 +03003255 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003256
3257 /* Guest paging mode is active - reset mmu */
3258 kvm_mmu_reset_context(&svm->vcpu);
3259
Joerg Roedeldefbba52009-08-07 11:49:30 +02003260 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003261 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
3262 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
3263 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01003264
Alexander Graf3d6368e2008-11-25 20:17:07 +01003265 /* In case we don't even reach vcpu_run, the fields are not updated */
3266 svm->vmcb->save.rax = nested_vmcb->save.rax;
3267 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3268 svm->vmcb->save.rip = nested_vmcb->save.rip;
3269 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3270 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3271 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3272
Joerg Roedelf7138532010-03-01 15:34:40 +01003273 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01003274 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003275
Joerg Roedelaad42c62009-08-07 11:49:34 +02003276 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003277 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01003278 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02003279 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3280 svm->nested.intercept = nested_vmcb->control.intercept;
3281
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003282 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003283 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003284 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3285 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3286 else
3287 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3288
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003289 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3290 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003291 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3292 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003293 }
3294
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003295 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003296 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003297
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003298 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003299 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3300 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3301 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003302 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3303 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3304
Joerg Roedel7597f122010-02-19 16:23:00 +01003305 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003306
Joerg Roedel20307532010-11-29 17:51:48 +01003307 /* Enter Guest-Mode */
3308 enter_guest_mode(&svm->vcpu);
3309
Joerg Roedel384c6362010-11-30 18:03:56 +01003310 /*
3311 * Merge guest and host intercepts - must be called with vcpu in
3312 * guest-mode to take affect here
3313 */
3314 recalc_intercepts(svm);
3315
Joerg Roedel06fc77722010-02-19 16:23:07 +01003316 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003317
Joerg Roedel2af91942009-08-07 11:49:28 +02003318 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003319
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003320 mark_all_dirty(svm->vmcb);
Ladi Prosekc2634062017-10-11 16:54:44 +02003321}
3322
3323static bool nested_svm_vmrun(struct vcpu_svm *svm)
3324{
3325 struct vmcb *nested_vmcb;
3326 struct vmcb *hsave = svm->nested.hsave;
3327 struct vmcb *vmcb = svm->vmcb;
3328 struct page *page;
3329 u64 vmcb_gpa;
3330
3331 vmcb_gpa = svm->vmcb->save.rax;
3332
3333 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3334 if (!nested_vmcb)
3335 return false;
3336
3337 if (!nested_vmcb_checks(nested_vmcb)) {
3338 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
3339 nested_vmcb->control.exit_code_hi = 0;
3340 nested_vmcb->control.exit_info_1 = 0;
3341 nested_vmcb->control.exit_info_2 = 0;
3342
3343 nested_svm_unmap(page);
3344
3345 return false;
3346 }
3347
3348 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3349 nested_vmcb->save.rip,
3350 nested_vmcb->control.int_ctl,
3351 nested_vmcb->control.event_inj,
3352 nested_vmcb->control.nested_ctl);
3353
3354 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3355 nested_vmcb->control.intercept_cr >> 16,
3356 nested_vmcb->control.intercept_exceptions,
3357 nested_vmcb->control.intercept);
3358
3359 /* Clear internal status */
3360 kvm_clear_exception_queue(&svm->vcpu);
3361 kvm_clear_interrupt_queue(&svm->vcpu);
3362
3363 /*
3364 * Save the old vmcb, so we don't need to pick what we save, but can
3365 * restore everything when a VMEXIT occurs
3366 */
3367 hsave->save.es = vmcb->save.es;
3368 hsave->save.cs = vmcb->save.cs;
3369 hsave->save.ss = vmcb->save.ss;
3370 hsave->save.ds = vmcb->save.ds;
3371 hsave->save.gdtr = vmcb->save.gdtr;
3372 hsave->save.idtr = vmcb->save.idtr;
3373 hsave->save.efer = svm->vcpu.arch.efer;
3374 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
3375 hsave->save.cr4 = svm->vcpu.arch.cr4;
3376 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3377 hsave->save.rip = kvm_rip_read(&svm->vcpu);
3378 hsave->save.rsp = vmcb->save.rsp;
3379 hsave->save.rax = vmcb->save.rax;
3380 if (npt_enabled)
3381 hsave->save.cr3 = vmcb->save.cr3;
3382 else
3383 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
3384
3385 copy_vmcb_control_area(hsave, vmcb);
3386
3387 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003388
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003389 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003390}
3391
Joerg Roedel9966bf62009-08-07 11:49:40 +02003392static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01003393{
3394 to_vmcb->save.fs = from_vmcb->save.fs;
3395 to_vmcb->save.gs = from_vmcb->save.gs;
3396 to_vmcb->save.tr = from_vmcb->save.tr;
3397 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3398 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3399 to_vmcb->save.star = from_vmcb->save.star;
3400 to_vmcb->save.lstar = from_vmcb->save.lstar;
3401 to_vmcb->save.cstar = from_vmcb->save.cstar;
3402 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3403 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3404 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3405 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01003406}
3407
Avi Kivity851ba692009-08-24 11:10:17 +03003408static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003409{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003410 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003411 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003412 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003413
Alexander Graf55426752008-11-25 20:17:06 +01003414 if (nested_svm_check_permissions(svm))
3415 return 1;
3416
Joerg Roedel7597f122010-02-19 16:23:00 +01003417 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003418 if (!nested_vmcb)
3419 return 1;
3420
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003421 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003422 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003423
Joerg Roedel9966bf62009-08-07 11:49:40 +02003424 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003425 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003426
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003427 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003428}
3429
Avi Kivity851ba692009-08-24 11:10:17 +03003430static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003431{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003432 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003433 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003434 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003435
Alexander Graf55426752008-11-25 20:17:06 +01003436 if (nested_svm_check_permissions(svm))
3437 return 1;
3438
Joerg Roedel7597f122010-02-19 16:23:00 +01003439 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003440 if (!nested_vmcb)
3441 return 1;
3442
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003443 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003444 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003445
Joerg Roedel9966bf62009-08-07 11:49:40 +02003446 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003447 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003448
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003449 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003450}
3451
Avi Kivity851ba692009-08-24 11:10:17 +03003452static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003453{
Alexander Graf3d6368e2008-11-25 20:17:07 +01003454 if (nested_svm_check_permissions(svm))
3455 return 1;
3456
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02003457 /* Save rip after vmrun instruction */
3458 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003459
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003460 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01003461 return 1;
3462
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003463 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02003464 goto failed;
3465
3466 return 1;
3467
3468failed:
3469
3470 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
3471 svm->vmcb->control.exit_code_hi = 0;
3472 svm->vmcb->control.exit_info_1 = 0;
3473 svm->vmcb->control.exit_info_2 = 0;
3474
3475 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003476
3477 return 1;
3478}
3479
Avi Kivity851ba692009-08-24 11:10:17 +03003480static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003481{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003482 int ret;
3483
Alexander Graf1371d902008-11-25 20:17:04 +01003484 if (nested_svm_check_permissions(svm))
3485 return 1;
3486
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003487 /*
3488 * If VGIF is enabled, the STGI intercept is only added to
Ladi Prosekcc3d9672017-10-17 16:02:39 +02003489 * detect the opening of the SMI/NMI window; remove it now.
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003490 */
3491 if (vgif_enabled(svm))
3492 clr_intercept(svm, INTERCEPT_STGI);
3493
Alexander Graf1371d902008-11-25 20:17:04 +01003494 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003495 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03003496 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003497
Joerg Roedel2af91942009-08-07 11:49:28 +02003498 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003499
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003500 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003501}
3502
Avi Kivity851ba692009-08-24 11:10:17 +03003503static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003504{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003505 int ret;
3506
Alexander Graf1371d902008-11-25 20:17:04 +01003507 if (nested_svm_check_permissions(svm))
3508 return 1;
3509
3510 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003511 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003512
Joerg Roedel2af91942009-08-07 11:49:28 +02003513 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003514
3515 /* After a CLGI no interrupts should come */
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05003516 if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3517 svm_clear_vintr(svm);
3518 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3519 mark_dirty(svm->vmcb, VMCB_INTR);
3520 }
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003521
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003522 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003523}
3524
Avi Kivity851ba692009-08-24 11:10:17 +03003525static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02003526{
3527 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02003528
David Kaplan668f1982015-02-20 16:02:10 -06003529 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3530 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedelec1ff792009-10-09 16:08:31 +02003531
Alexander Grafff092382009-06-15 15:21:24 +02003532 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
David Kaplan668f1982015-02-20 16:02:10 -06003533 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Alexander Grafff092382009-06-15 15:21:24 +02003534
3535 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003536 return kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Grafff092382009-06-15 15:21:24 +02003537}
3538
Joerg Roedel532a46b2009-10-09 16:08:32 +02003539static int skinit_interception(struct vcpu_svm *svm)
3540{
David Kaplan668f1982015-02-20 16:02:10 -06003541 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedel532a46b2009-10-09 16:08:32 +02003542
3543 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3544 return 1;
3545}
3546
David Kaplandab429a2015-03-02 13:43:37 -06003547static int wbinvd_interception(struct vcpu_svm *svm)
3548{
Kyle Huey6affcbe2016-11-29 12:40:40 -08003549 return kvm_emulate_wbinvd(&svm->vcpu);
David Kaplandab429a2015-03-02 13:43:37 -06003550}
3551
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003552static int xsetbv_interception(struct vcpu_svm *svm)
3553{
3554 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3555 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3556
3557 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3558 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003559 return kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003560 }
3561
3562 return 1;
3563}
3564
Avi Kivity851ba692009-08-24 11:10:17 +03003565static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003566{
Izik Eidus37817f22008-03-24 23:14:53 +02003567 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003568 int reason;
3569 int int_type = svm->vmcb->control.exit_int_info &
3570 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03003571 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003572 uint32_t type =
3573 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3574 uint32_t idt_v =
3575 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02003576 bool has_error_code = false;
3577 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02003578
3579 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003580
Izik Eidus37817f22008-03-24 23:14:53 +02003581 if (svm->vmcb->control.exit_info_2 &
3582 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003583 reason = TASK_SWITCH_IRET;
3584 else if (svm->vmcb->control.exit_info_2 &
3585 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3586 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003587 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003588 reason = TASK_SWITCH_GATE;
3589 else
3590 reason = TASK_SWITCH_CALL;
3591
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003592 if (reason == TASK_SWITCH_GATE) {
3593 switch (type) {
3594 case SVM_EXITINTINFO_TYPE_NMI:
3595 svm->vcpu.arch.nmi_injected = false;
3596 break;
3597 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02003598 if (svm->vmcb->control.exit_info_2 &
3599 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3600 has_error_code = true;
3601 error_code =
3602 (u32)svm->vmcb->control.exit_info_2;
3603 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003604 kvm_clear_exception_queue(&svm->vcpu);
3605 break;
3606 case SVM_EXITINTINFO_TYPE_INTR:
3607 kvm_clear_interrupt_queue(&svm->vcpu);
3608 break;
3609 default:
3610 break;
3611 }
3612 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003613
Gleb Natapov8317c292009-04-12 13:37:02 +03003614 if (reason != TASK_SWITCH_GATE ||
3615 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3616 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03003617 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3618 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003619
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01003620 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3621 int_vec = -1;
3622
3623 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
Gleb Natapovacb54512010-04-15 21:03:50 +03003624 has_error_code, error_code) == EMULATE_FAIL) {
3625 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3626 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3627 svm->vcpu.run->internal.ndata = 0;
3628 return 0;
3629 }
3630 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003631}
3632
Avi Kivity851ba692009-08-24 11:10:17 +03003633static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003634{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003635 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Kyle Huey6a908b62016-11-29 12:40:37 -08003636 return kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003637}
3638
Avi Kivity851ba692009-08-24 11:10:17 +03003639static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003640{
3641 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003642 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03003643 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003644 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Radim Krčmářf303b4c2014-01-17 20:52:42 +01003645 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003646 return 1;
3647}
3648
Avi Kivity851ba692009-08-24 11:10:17 +03003649static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03003650{
Andre Przywaradf4f31082010-12-21 11:12:06 +01003651 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3652 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3653
3654 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003655 return kvm_skip_emulated_instruction(&svm->vcpu);
Marcelo Tosattia7052892008-09-23 13:18:35 -03003656}
3657
Avi Kivity851ba692009-08-24 11:10:17 +03003658static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003659{
Andre Przywara51d8b662010-12-21 11:12:02 +01003660 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003661}
3662
Avi Kivity332b56e2011-11-10 14:57:24 +02003663static int rdpmc_interception(struct vcpu_svm *svm)
3664{
3665 int err;
3666
3667 if (!static_cpu_has(X86_FEATURE_NRIPS))
3668 return emulate_on_interception(svm);
3669
3670 err = kvm_rdpmc(&svm->vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08003671 return kvm_complete_insn_gp(&svm->vcpu, err);
Avi Kivity332b56e2011-11-10 14:57:24 +02003672}
3673
Xiubo Li52eb5a62015-03-13 17:39:45 +08003674static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3675 unsigned long val)
Joerg Roedel628afd22011-04-04 12:39:36 +02003676{
3677 unsigned long cr0 = svm->vcpu.arch.cr0;
3678 bool ret = false;
3679 u64 intercept;
3680
3681 intercept = svm->nested.intercept;
3682
3683 if (!is_guest_mode(&svm->vcpu) ||
3684 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3685 return false;
3686
3687 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3688 val &= ~SVM_CR0_SELECTIVE_MASK;
3689
3690 if (cr0 ^ val) {
3691 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3692 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3693 }
3694
3695 return ret;
3696}
3697
Andre Przywara7ff76d52010-12-21 11:12:04 +01003698#define CR_VALID (1ULL << 63)
3699
3700static int cr_interception(struct vcpu_svm *svm)
3701{
3702 int reg, cr;
3703 unsigned long val;
3704 int err;
3705
3706 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3707 return emulate_on_interception(svm);
3708
3709 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3710 return emulate_on_interception(svm);
3711
3712 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
David Kaplan5e575182015-03-06 14:44:35 -06003713 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3714 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3715 else
3716 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
Andre Przywara7ff76d52010-12-21 11:12:04 +01003717
3718 err = 0;
3719 if (cr >= 16) { /* mov to cr */
3720 cr -= 16;
3721 val = kvm_register_read(&svm->vcpu, reg);
3722 switch (cr) {
3723 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02003724 if (!check_selective_cr0_intercepted(svm, val))
3725 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02003726 else
3727 return 1;
3728
Andre Przywara7ff76d52010-12-21 11:12:04 +01003729 break;
3730 case 3:
3731 err = kvm_set_cr3(&svm->vcpu, val);
3732 break;
3733 case 4:
3734 err = kvm_set_cr4(&svm->vcpu, val);
3735 break;
3736 case 8:
3737 err = kvm_set_cr8(&svm->vcpu, val);
3738 break;
3739 default:
3740 WARN(1, "unhandled write to CR%d", cr);
3741 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3742 return 1;
3743 }
3744 } else { /* mov from cr */
3745 switch (cr) {
3746 case 0:
3747 val = kvm_read_cr0(&svm->vcpu);
3748 break;
3749 case 2:
3750 val = svm->vcpu.arch.cr2;
3751 break;
3752 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02003753 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003754 break;
3755 case 4:
3756 val = kvm_read_cr4(&svm->vcpu);
3757 break;
3758 case 8:
3759 val = kvm_get_cr8(&svm->vcpu);
3760 break;
3761 default:
3762 WARN(1, "unhandled read from CR%d", cr);
3763 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3764 return 1;
3765 }
3766 kvm_register_write(&svm->vcpu, reg, val);
3767 }
Kyle Huey6affcbe2016-11-29 12:40:40 -08003768 return kvm_complete_insn_gp(&svm->vcpu, err);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003769}
3770
Andre Przywaracae37972010-12-21 11:12:05 +01003771static int dr_interception(struct vcpu_svm *svm)
3772{
3773 int reg, dr;
3774 unsigned long val;
Andre Przywaracae37972010-12-21 11:12:05 +01003775
Paolo Bonzinifacb0132014-02-21 10:32:27 +01003776 if (svm->vcpu.guest_debug == 0) {
3777 /*
3778 * No more DR vmexits; force a reload of the debug registers
3779 * and reenter on this instruction. The next vmexit will
3780 * retrieve the full state of the debug registers.
3781 */
3782 clr_dr_intercepts(svm);
3783 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
3784 return 1;
3785 }
3786
Andre Przywaracae37972010-12-21 11:12:05 +01003787 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3788 return emulate_on_interception(svm);
3789
3790 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3791 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3792
3793 if (dr >= 16) { /* mov to DRn */
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003794 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3795 return 1;
Andre Przywaracae37972010-12-21 11:12:05 +01003796 val = kvm_register_read(&svm->vcpu, reg);
3797 kvm_set_dr(&svm->vcpu, dr - 16, val);
3798 } else {
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003799 if (!kvm_require_dr(&svm->vcpu, dr))
3800 return 1;
3801 kvm_get_dr(&svm->vcpu, dr, &val);
3802 kvm_register_write(&svm->vcpu, reg, val);
Andre Przywaracae37972010-12-21 11:12:05 +01003803 }
3804
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003805 return kvm_skip_emulated_instruction(&svm->vcpu);
Andre Przywaracae37972010-12-21 11:12:05 +01003806}
3807
Avi Kivity851ba692009-08-24 11:10:17 +03003808static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01003809{
Avi Kivity851ba692009-08-24 11:10:17 +03003810 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01003811 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03003812
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003813 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3814 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01003815 r = cr_interception(svm);
Paolo Bonzini35754c92015-07-29 12:05:37 +02003816 if (lapic_in_kernel(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003817 return r;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003818 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003819 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01003820 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3821 return 0;
3822}
3823
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003824static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003825{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003826 struct vcpu_svm *svm = to_svm(vcpu);
3827
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003828 switch (msr_info->index) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05303829 case MSR_IA32_TSC: {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003830 msr_info->data = svm->vmcb->control.tsc_offset +
Haozhong Zhang35181e82015-10-20 15:39:03 +08003831 kvm_scale_tsc(vcpu, rdtsc());
Joerg Roedelfbc0db72011-03-25 09:44:46 +01003832
Avi Kivity6aa8b732006-12-10 02:21:36 -08003833 break;
3834 }
Brian Gerst8c065852010-07-17 09:03:26 -04003835 case MSR_STAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003836 msr_info->data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003837 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08003838#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003839 case MSR_LSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003840 msr_info->data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003841 break;
3842 case MSR_CSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003843 msr_info->data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003844 break;
3845 case MSR_KERNEL_GS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003846 msr_info->data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003847 break;
3848 case MSR_SYSCALL_MASK:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003849 msr_info->data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003850 break;
3851#endif
3852 case MSR_IA32_SYSENTER_CS:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003853 msr_info->data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003854 break;
3855 case MSR_IA32_SYSENTER_EIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003856 msr_info->data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003857 break;
3858 case MSR_IA32_SYSENTER_ESP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003859 msr_info->data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003860 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003861 case MSR_TSC_AUX:
3862 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3863 return 1;
3864 msr_info->data = svm->tsc_aux;
3865 break;
Joerg Roedele0231712010-02-24 18:59:10 +01003866 /*
3867 * Nobody will change the following 5 values in the VMCB so we can
3868 * safely return them on rdmsr. They will always be 0 until LBRV is
3869 * implemented.
3870 */
Joerg Roedela2938c82008-02-13 16:30:28 +01003871 case MSR_IA32_DEBUGCTLMSR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003872 msr_info->data = svm->vmcb->save.dbgctl;
Joerg Roedela2938c82008-02-13 16:30:28 +01003873 break;
3874 case MSR_IA32_LASTBRANCHFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003875 msr_info->data = svm->vmcb->save.br_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003876 break;
3877 case MSR_IA32_LASTBRANCHTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003878 msr_info->data = svm->vmcb->save.br_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003879 break;
3880 case MSR_IA32_LASTINTFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003881 msr_info->data = svm->vmcb->save.last_excp_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003882 break;
3883 case MSR_IA32_LASTINTTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003884 msr_info->data = svm->vmcb->save.last_excp_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003885 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003886 case MSR_VM_HSAVE_PA:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003887 msr_info->data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003888 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003889 case MSR_VM_CR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003890 msr_info->data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003891 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003892 case MSR_IA32_UCODE_REV:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003893 msr_info->data = 0x01000065;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003894 break;
Borislav Petkovae8b7872015-11-23 11:12:23 +01003895 case MSR_F15H_IC_CFG: {
3896
3897 int family, model;
3898
3899 family = guest_cpuid_family(vcpu);
3900 model = guest_cpuid_model(vcpu);
3901
3902 if (family < 0 || model < 0)
3903 return kvm_get_msr_common(vcpu, msr_info);
3904
3905 msr_info->data = 0;
3906
3907 if (family == 0x15 &&
3908 (model >= 0x2 && model < 0x20))
3909 msr_info->data = 0x1E;
3910 }
3911 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003912 default:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003913 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003914 }
3915 return 0;
3916}
3917
Avi Kivity851ba692009-08-24 11:10:17 +03003918static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003919{
David Kaplan668f1982015-02-20 16:02:10 -06003920 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003921 struct msr_data msr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003922
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003923 msr_info.index = ecx;
3924 msr_info.host_initiated = false;
3925 if (svm_get_msr(&svm->vcpu, &msr_info)) {
Avi Kivity59200272010-01-25 19:47:02 +02003926 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003927 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003928 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02003929 } else {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003930 trace_kvm_msr_read(ecx, msr_info.data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003931
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003932 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3933 msr_info.data & 0xffffffff);
3934 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3935 msr_info.data >> 32);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003936 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003937 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003938 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003939}
3940
Joerg Roedel4a810182010-02-24 18:59:15 +01003941static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3942{
3943 struct vcpu_svm *svm = to_svm(vcpu);
3944 int svm_dis, chg_mask;
3945
3946 if (data & ~SVM_VM_CR_VALID_MASK)
3947 return 1;
3948
3949 chg_mask = SVM_VM_CR_VALID_MASK;
3950
3951 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3952 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3953
3954 svm->nested.vm_cr_msr &= ~chg_mask;
3955 svm->nested.vm_cr_msr |= (data & chg_mask);
3956
3957 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3958
3959 /* check for svm_disable while efer.svme is set */
3960 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3961 return 1;
3962
3963 return 0;
3964}
3965
Will Auld8fe8ab42012-11-29 12:42:12 -08003966static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003967{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003968 struct vcpu_svm *svm = to_svm(vcpu);
3969
Will Auld8fe8ab42012-11-29 12:42:12 -08003970 u32 ecx = msr->index;
3971 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003972 switch (ecx) {
Paolo Bonzini15038e12017-10-26 09:13:27 +02003973 case MSR_IA32_CR_PAT:
3974 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3975 return 1;
3976 vcpu->arch.pat = data;
3977 svm->vmcb->save.g_pat = data;
3978 mark_dirty(svm->vmcb, VMCB_NPT);
3979 break;
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10003980 case MSR_IA32_TSC:
Will Auld8fe8ab42012-11-29 12:42:12 -08003981 kvm_write_tsc(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003982 break;
Brian Gerst8c065852010-07-17 09:03:26 -04003983 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003984 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003985 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08003986#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003987 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003988 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003989 break;
3990 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003991 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003992 break;
3993 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003994 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003995 break;
3996 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003997 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003998 break;
3999#endif
4000 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004001 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004002 break;
4003 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02004004 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004005 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004006 break;
4007 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02004008 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004009 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004010 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01004011 case MSR_TSC_AUX:
4012 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4013 return 1;
4014
4015 /*
4016 * This is rare, so we update the MSR here instead of using
4017 * direct_access_msrs. Doing that would require a rdmsr in
4018 * svm_vcpu_put.
4019 */
4020 svm->tsc_aux = data;
4021 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
4022 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01004023 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02004024 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Christoffer Dalla737f252012-06-03 21:17:48 +03004025 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
4026 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01004027 break;
4028 }
4029 if (data & DEBUGCTL_RESERVED_BITS)
4030 return 1;
4031
4032 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01004033 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01004034 if (data & (1ULL<<0))
4035 svm_enable_lbrv(svm);
4036 else
4037 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01004038 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01004039 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02004040 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01004041 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004042 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01004043 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004044 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03004045 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004046 break;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004047 case MSR_IA32_APICBASE:
4048 if (kvm_vcpu_apicv_active(vcpu))
4049 avic_update_vapic_bar(to_svm(vcpu), data);
4050 /* Follow through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004051 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08004052 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004053 }
4054 return 0;
4055}
4056
Avi Kivity851ba692009-08-24 11:10:17 +03004057static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004058{
Will Auld8fe8ab42012-11-29 12:42:12 -08004059 struct msr_data msr;
David Kaplan668f1982015-02-20 16:02:10 -06004060 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4061 u64 data = kvm_read_edx_eax(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004062
Will Auld8fe8ab42012-11-29 12:42:12 -08004063 msr.data = data;
4064 msr.index = ecx;
4065 msr.host_initiated = false;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004066
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004067 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Nadav Amit854e8bb2014-09-16 03:24:05 +03004068 if (kvm_set_msr(&svm->vcpu, &msr)) {
Avi Kivity59200272010-01-25 19:47:02 +02004069 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02004070 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004071 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02004072 } else {
4073 trace_kvm_msr_write(ecx, data);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004074 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02004075 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004076}
4077
Avi Kivity851ba692009-08-24 11:10:17 +03004078static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004079{
Rusty Russelle756fc62007-07-30 20:07:08 +10004080 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03004081 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004082 else
Avi Kivity851ba692009-08-24 11:10:17 +03004083 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004084}
4085
Avi Kivity851ba692009-08-24 11:10:17 +03004086static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08004087{
Avi Kivity3842d132010-07-27 12:30:24 +03004088 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01004089 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03004090 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004091 mark_dirty(svm->vmcb, VMCB_INTR);
Jason Wang675acb72012-03-08 18:07:56 +08004092 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08004093 return 1;
4094}
4095
Mark Langsdorf565d0992009-10-06 14:25:02 -05004096static int pause_interception(struct vcpu_svm *svm)
4097{
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08004098 struct kvm_vcpu *vcpu = &svm->vcpu;
4099 bool in_kernel = (svm_get_cpl(vcpu) == 0);
4100
4101 kvm_vcpu_on_spin(vcpu, in_kernel);
Mark Langsdorf565d0992009-10-06 14:25:02 -05004102 return 1;
4103}
4104
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004105static int nop_interception(struct vcpu_svm *svm)
4106{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004107 return kvm_skip_emulated_instruction(&(svm->vcpu));
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004108}
4109
4110static int monitor_interception(struct vcpu_svm *svm)
4111{
4112 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4113 return nop_interception(svm);
4114}
4115
4116static int mwait_interception(struct vcpu_svm *svm)
4117{
4118 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4119 return nop_interception(svm);
4120}
4121
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004122enum avic_ipi_failure_cause {
4123 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4124 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4125 AVIC_IPI_FAILURE_INVALID_TARGET,
4126 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4127};
4128
4129static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4130{
4131 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4132 u32 icrl = svm->vmcb->control.exit_info_1;
4133 u32 id = svm->vmcb->control.exit_info_2 >> 32;
Dan Carpenter5446a972016-05-23 13:20:10 +03004134 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004135 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4136
4137 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4138
4139 switch (id) {
4140 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4141 /*
4142 * AVIC hardware handles the generation of
4143 * IPIs when the specified Message Type is Fixed
4144 * (also known as fixed delivery mode) and
4145 * the Trigger Mode is edge-triggered. The hardware
4146 * also supports self and broadcast delivery modes
4147 * specified via the Destination Shorthand(DSH)
4148 * field of the ICRL. Logical and physical APIC ID
4149 * formats are supported. All other IPI types cause
4150 * a #VMEXIT, which needs to emulated.
4151 */
4152 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4153 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4154 break;
4155 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4156 int i;
4157 struct kvm_vcpu *vcpu;
4158 struct kvm *kvm = svm->vcpu.kvm;
4159 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4160
4161 /*
4162 * At this point, we expect that the AVIC HW has already
4163 * set the appropriate IRR bits on the valid target
4164 * vcpus. So, we just need to kick the appropriate vcpu.
4165 */
4166 kvm_for_each_vcpu(i, vcpu, kvm) {
4167 bool m = kvm_apic_match_dest(vcpu, apic,
4168 icrl & KVM_APIC_SHORT_MASK,
4169 GET_APIC_DEST_FIELD(icrh),
4170 icrl & KVM_APIC_DEST_MASK);
4171
4172 if (m && !avic_vcpu_is_running(vcpu))
4173 kvm_vcpu_wake_up(vcpu);
4174 }
4175 break;
4176 }
4177 case AVIC_IPI_FAILURE_INVALID_TARGET:
4178 break;
4179 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4180 WARN_ONCE(1, "Invalid backing page\n");
4181 break;
4182 default:
4183 pr_err("Unknown IPI interception\n");
4184 }
4185
4186 return 1;
4187}
4188
4189static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4190{
4191 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4192 int index;
4193 u32 *logical_apic_id_table;
4194 int dlid = GET_APIC_LOGICAL_ID(ldr);
4195
4196 if (!dlid)
4197 return NULL;
4198
4199 if (flat) { /* flat */
4200 index = ffs(dlid) - 1;
4201 if (index > 7)
4202 return NULL;
4203 } else { /* cluster */
4204 int cluster = (dlid & 0xf0) >> 4;
4205 int apic = ffs(dlid & 0x0f) - 1;
4206
4207 if ((apic < 0) || (apic > 7) ||
4208 (cluster >= 0xf))
4209 return NULL;
4210 index = (cluster << 2) + apic;
4211 }
4212
4213 logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
4214
4215 return &logical_apic_id_table[index];
4216}
4217
4218static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
4219 bool valid)
4220{
4221 bool flat;
4222 u32 *entry, new_entry;
4223
4224 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4225 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4226 if (!entry)
4227 return -EINVAL;
4228
4229 new_entry = READ_ONCE(*entry);
4230 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4231 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4232 if (valid)
4233 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4234 else
4235 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4236 WRITE_ONCE(*entry, new_entry);
4237
4238 return 0;
4239}
4240
4241static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4242{
4243 int ret;
4244 struct vcpu_svm *svm = to_svm(vcpu);
4245 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4246
4247 if (!ldr)
4248 return 1;
4249
4250 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
4251 if (ret && svm->ldr_reg) {
4252 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
4253 svm->ldr_reg = 0;
4254 } else {
4255 svm->ldr_reg = ldr;
4256 }
4257 return ret;
4258}
4259
4260static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4261{
4262 u64 *old, *new;
4263 struct vcpu_svm *svm = to_svm(vcpu);
4264 u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4265 u32 id = (apic_id_reg >> 24) & 0xff;
4266
4267 if (vcpu->vcpu_id == id)
4268 return 0;
4269
4270 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4271 new = avic_get_physical_id_entry(vcpu, id);
4272 if (!new || !old)
4273 return 1;
4274
4275 /* We need to move physical_id_entry to new offset */
4276 *new = *old;
4277 *old = 0ULL;
4278 to_svm(vcpu)->avic_physical_id_cache = new;
4279
4280 /*
4281 * Also update the guest physical APIC ID in the logical
4282 * APIC ID table entry if already setup the LDR.
4283 */
4284 if (svm->ldr_reg)
4285 avic_handle_ldr_update(vcpu);
4286
4287 return 0;
4288}
4289
4290static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4291{
4292 struct vcpu_svm *svm = to_svm(vcpu);
4293 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4294 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4295 u32 mod = (dfr >> 28) & 0xf;
4296
4297 /*
4298 * We assume that all local APICs are using the same type.
4299 * If this changes, we need to flush the AVIC logical
4300 * APID id table.
4301 */
4302 if (vm_data->ldr_mode == mod)
4303 return 0;
4304
4305 clear_page(page_address(vm_data->avic_logical_id_table_page));
4306 vm_data->ldr_mode = mod;
4307
4308 if (svm->ldr_reg)
4309 avic_handle_ldr_update(vcpu);
4310 return 0;
4311}
4312
4313static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4314{
4315 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4316 u32 offset = svm->vmcb->control.exit_info_1 &
4317 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4318
4319 switch (offset) {
4320 case APIC_ID:
4321 if (avic_handle_apic_id_update(&svm->vcpu))
4322 return 0;
4323 break;
4324 case APIC_LDR:
4325 if (avic_handle_ldr_update(&svm->vcpu))
4326 return 0;
4327 break;
4328 case APIC_DFR:
4329 avic_handle_dfr_update(&svm->vcpu);
4330 break;
4331 default:
4332 break;
4333 }
4334
4335 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4336
4337 return 1;
4338}
4339
4340static bool is_avic_unaccelerated_access_trap(u32 offset)
4341{
4342 bool ret = false;
4343
4344 switch (offset) {
4345 case APIC_ID:
4346 case APIC_EOI:
4347 case APIC_RRR:
4348 case APIC_LDR:
4349 case APIC_DFR:
4350 case APIC_SPIV:
4351 case APIC_ESR:
4352 case APIC_ICR:
4353 case APIC_LVTT:
4354 case APIC_LVTTHMR:
4355 case APIC_LVTPC:
4356 case APIC_LVT0:
4357 case APIC_LVT1:
4358 case APIC_LVTERR:
4359 case APIC_TMICT:
4360 case APIC_TDCR:
4361 ret = true;
4362 break;
4363 default:
4364 break;
4365 }
4366 return ret;
4367}
4368
4369static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4370{
4371 int ret = 0;
4372 u32 offset = svm->vmcb->control.exit_info_1 &
4373 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4374 u32 vector = svm->vmcb->control.exit_info_2 &
4375 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4376 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4377 AVIC_UNACCEL_ACCESS_WRITE_MASK;
4378 bool trap = is_avic_unaccelerated_access_trap(offset);
4379
4380 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4381 trap, write, vector);
4382 if (trap) {
4383 /* Handling Trap */
4384 WARN_ONCE(!write, "svm: Handling trap read.\n");
4385 ret = avic_unaccel_trap_write(svm);
4386 } else {
4387 /* Handling Fault */
4388 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4389 }
4390
4391 return ret;
4392}
4393
Mathias Krause09941fb2012-08-30 01:30:20 +02004394static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01004395 [SVM_EXIT_READ_CR0] = cr_interception,
4396 [SVM_EXIT_READ_CR3] = cr_interception,
4397 [SVM_EXIT_READ_CR4] = cr_interception,
4398 [SVM_EXIT_READ_CR8] = cr_interception,
David Kaplan5e575182015-03-06 14:44:35 -06004399 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02004400 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01004401 [SVM_EXIT_WRITE_CR3] = cr_interception,
4402 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004403 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01004404 [SVM_EXIT_READ_DR0] = dr_interception,
4405 [SVM_EXIT_READ_DR1] = dr_interception,
4406 [SVM_EXIT_READ_DR2] = dr_interception,
4407 [SVM_EXIT_READ_DR3] = dr_interception,
4408 [SVM_EXIT_READ_DR4] = dr_interception,
4409 [SVM_EXIT_READ_DR5] = dr_interception,
4410 [SVM_EXIT_READ_DR6] = dr_interception,
4411 [SVM_EXIT_READ_DR7] = dr_interception,
4412 [SVM_EXIT_WRITE_DR0] = dr_interception,
4413 [SVM_EXIT_WRITE_DR1] = dr_interception,
4414 [SVM_EXIT_WRITE_DR2] = dr_interception,
4415 [SVM_EXIT_WRITE_DR3] = dr_interception,
4416 [SVM_EXIT_WRITE_DR4] = dr_interception,
4417 [SVM_EXIT_WRITE_DR5] = dr_interception,
4418 [SVM_EXIT_WRITE_DR6] = dr_interception,
4419 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004420 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
4421 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05004422 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004423 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004424 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Eric Northup54a20552015-11-03 18:03:53 +01004425 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004426 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02004427 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004428 [SVM_EXIT_SMI] = nop_on_interception,
4429 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08004430 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02004431 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004432 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004433 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02004434 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05004435 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004436 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03004437 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02004438 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004439 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004440 [SVM_EXIT_MSR] = msr_interception,
4441 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08004442 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01004443 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02004444 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01004445 [SVM_EXIT_VMLOAD] = vmload_interception,
4446 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01004447 [SVM_EXIT_STGI] = stgi_interception,
4448 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02004449 [SVM_EXIT_SKINIT] = skinit_interception,
David Kaplandab429a2015-03-02 13:43:37 -06004450 [SVM_EXIT_WBINVD] = wbinvd_interception,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004451 [SVM_EXIT_MONITOR] = monitor_interception,
4452 [SVM_EXIT_MWAIT] = mwait_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01004453 [SVM_EXIT_XSETBV] = xsetbv_interception,
Paolo Bonzinid0006532017-08-11 18:36:43 +02004454 [SVM_EXIT_NPF] = npf_interception,
Paolo Bonzini64d60672015-05-07 11:36:11 +02004455 [SVM_EXIT_RSM] = emulate_on_interception,
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004456 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4457 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004458};
4459
Joe Perchesae8cc052011-04-24 22:00:50 -07004460static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02004461{
4462 struct vcpu_svm *svm = to_svm(vcpu);
4463 struct vmcb_control_area *control = &svm->vmcb->control;
4464 struct vmcb_save_area *save = &svm->vmcb->save;
4465
4466 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004467 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4468 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4469 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4470 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4471 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4472 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4473 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4474 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4475 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4476 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4477 pr_err("%-20s%d\n", "asid:", control->asid);
4478 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4479 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4480 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4481 pr_err("%-20s%08x\n", "int_state:", control->int_state);
4482 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4483 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4484 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4485 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4486 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4487 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4488 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004489 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
Joe Perchesae8cc052011-04-24 22:00:50 -07004490 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4491 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05004492 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
Joe Perchesae8cc052011-04-24 22:00:50 -07004493 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004494 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4495 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4496 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004497 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004498 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4499 "es:",
4500 save->es.selector, save->es.attrib,
4501 save->es.limit, save->es.base);
4502 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4503 "cs:",
4504 save->cs.selector, save->cs.attrib,
4505 save->cs.limit, save->cs.base);
4506 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4507 "ss:",
4508 save->ss.selector, save->ss.attrib,
4509 save->ss.limit, save->ss.base);
4510 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4511 "ds:",
4512 save->ds.selector, save->ds.attrib,
4513 save->ds.limit, save->ds.base);
4514 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4515 "fs:",
4516 save->fs.selector, save->fs.attrib,
4517 save->fs.limit, save->fs.base);
4518 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4519 "gs:",
4520 save->gs.selector, save->gs.attrib,
4521 save->gs.limit, save->gs.base);
4522 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4523 "gdtr:",
4524 save->gdtr.selector, save->gdtr.attrib,
4525 save->gdtr.limit, save->gdtr.base);
4526 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4527 "ldtr:",
4528 save->ldtr.selector, save->ldtr.attrib,
4529 save->ldtr.limit, save->ldtr.base);
4530 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4531 "idtr:",
4532 save->idtr.selector, save->idtr.attrib,
4533 save->idtr.limit, save->idtr.base);
4534 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4535 "tr:",
4536 save->tr.selector, save->tr.attrib,
4537 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004538 pr_err("cpl: %d efer: %016llx\n",
4539 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07004540 pr_err("%-15s %016llx %-13s %016llx\n",
4541 "cr0:", save->cr0, "cr2:", save->cr2);
4542 pr_err("%-15s %016llx %-13s %016llx\n",
4543 "cr3:", save->cr3, "cr4:", save->cr4);
4544 pr_err("%-15s %016llx %-13s %016llx\n",
4545 "dr6:", save->dr6, "dr7:", save->dr7);
4546 pr_err("%-15s %016llx %-13s %016llx\n",
4547 "rip:", save->rip, "rflags:", save->rflags);
4548 pr_err("%-15s %016llx %-13s %016llx\n",
4549 "rsp:", save->rsp, "rax:", save->rax);
4550 pr_err("%-15s %016llx %-13s %016llx\n",
4551 "star:", save->star, "lstar:", save->lstar);
4552 pr_err("%-15s %016llx %-13s %016llx\n",
4553 "cstar:", save->cstar, "sfmask:", save->sfmask);
4554 pr_err("%-15s %016llx %-13s %016llx\n",
4555 "kernel_gs_base:", save->kernel_gs_base,
4556 "sysenter_cs:", save->sysenter_cs);
4557 pr_err("%-15s %016llx %-13s %016llx\n",
4558 "sysenter_esp:", save->sysenter_esp,
4559 "sysenter_eip:", save->sysenter_eip);
4560 pr_err("%-15s %016llx %-13s %016llx\n",
4561 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4562 pr_err("%-15s %016llx %-13s %016llx\n",
4563 "br_from:", save->br_from, "br_to:", save->br_to);
4564 pr_err("%-15s %016llx %-13s %016llx\n",
4565 "excp_from:", save->last_excp_from,
4566 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004567}
4568
Avi Kivity586f9602010-11-18 13:09:54 +02004569static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4570{
4571 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4572
4573 *info1 = control->exit_info_1;
4574 *info2 = control->exit_info_2;
4575}
4576
Avi Kivity851ba692009-08-24 11:10:17 +03004577static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004578{
Avi Kivity04d2cc72007-09-10 18:10:54 +03004579 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03004580 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004581 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004582
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01004583 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4584
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004585 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02004586 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4587 if (npt_enabled)
4588 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004589
Joerg Roedelcd3ff652009-10-09 16:08:26 +02004590 if (unlikely(svm->nested.exit_required)) {
4591 nested_svm_vmexit(svm);
4592 svm->nested.exit_required = false;
4593
4594 return 1;
4595 }
4596
Joerg Roedel20307532010-11-29 17:51:48 +01004597 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02004598 int vmexit;
4599
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004600 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4601 svm->vmcb->control.exit_info_1,
4602 svm->vmcb->control.exit_info_2,
4603 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01004604 svm->vmcb->control.exit_int_info_err,
4605 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004606
Joerg Roedel410e4d52009-08-07 11:49:44 +02004607 vmexit = nested_svm_exit_special(svm);
4608
4609 if (vmexit == NESTED_EXIT_CONTINUE)
4610 vmexit = nested_svm_exit_handled(svm);
4611
4612 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01004613 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01004614 }
4615
Joerg Roedela5c38322009-08-07 11:49:32 +02004616 svm_complete_interrupts(svm);
4617
Avi Kivity04d2cc72007-09-10 18:10:54 +03004618 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4619 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4620 kvm_run->fail_entry.hardware_entry_failure_reason
4621 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02004622 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4623 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03004624 return 0;
4625 }
4626
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004627 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01004628 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02004629 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4630 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Borislav Petkov6614c7d2013-04-26 00:22:01 +02004631 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity6aa8b732006-12-10 02:21:36 -08004632 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004633 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004634 exit_code);
4635
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02004636 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08004637 || !svm_exit_handlers[exit_code]) {
Bandan Dasfaac2452015-03-16 17:18:25 -04004638 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
Michael S. Tsirkin2bc19dc2014-09-18 16:21:16 +03004639 kvm_queue_exception(vcpu, UD_VECTOR);
4640 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004641 }
4642
Avi Kivity851ba692009-08-24 11:10:17 +03004643 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004644}
4645
4646static void reload_tss(struct kvm_vcpu *vcpu)
4647{
4648 int cpu = raw_smp_processor_id();
4649
Tejun Heo0fe1e002009-10-29 22:34:14 +09004650 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4651 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004652 load_TR_desc();
4653}
4654
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004655static void pre_sev_run(struct vcpu_svm *svm, int cpu)
4656{
4657 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4658 int asid = sev_get_asid(svm->vcpu.kvm);
4659
4660 /* Assign the asid allocated with this SEV guest */
4661 svm->vmcb->control.asid = asid;
4662
4663 /*
4664 * Flush guest TLB:
4665 *
4666 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
4667 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
4668 */
4669 if (sd->sev_vmcbs[asid] == svm->vmcb &&
4670 svm->last_cpu == cpu)
4671 return;
4672
4673 svm->last_cpu = cpu;
4674 sd->sev_vmcbs[asid] = svm->vmcb;
4675 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4676 mark_dirty(svm->vmcb, VMCB_ASID);
4677}
4678
Rusty Russelle756fc62007-07-30 20:07:08 +10004679static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004680{
4681 int cpu = raw_smp_processor_id();
4682
Tejun Heo0fe1e002009-10-29 22:34:14 +09004683 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004684
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004685 if (sev_guest(svm->vcpu.kvm))
4686 return pre_sev_run(svm, cpu);
4687
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03004688 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09004689 if (svm->asid_generation != sd->asid_generation)
4690 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004691}
4692
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004693static void svm_inject_nmi(struct kvm_vcpu *vcpu)
4694{
4695 struct vcpu_svm *svm = to_svm(vcpu);
4696
4697 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
4698 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004699 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004700 ++vcpu->stat.nmi_injections;
4701}
Avi Kivity6aa8b732006-12-10 02:21:36 -08004702
Eddie Dong85f455f2007-07-06 12:20:49 +03004703static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004704{
4705 struct vmcb_control_area *control;
4706
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004707 /* The following fields are ignored when AVIC is enabled */
Rusty Russelle756fc62007-07-30 20:07:08 +10004708 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03004709 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004710 control->int_ctl &= ~V_INTR_PRIO_MASK;
4711 control->int_ctl |= V_IRQ_MASK |
4712 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004713 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004714}
4715
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004716static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03004717{
4718 struct vcpu_svm *svm = to_svm(vcpu);
4719
Joerg Roedel2af91942009-08-07 11:49:28 +02004720 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01004721
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03004722 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
4723 ++vcpu->stat.irq_injections;
4724
Alexander Graf219b65d2009-06-15 15:21:25 +02004725 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
4726 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03004727}
4728
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004729static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
4730{
4731 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
4732}
4733
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004734static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
4735{
4736 struct vcpu_svm *svm = to_svm(vcpu);
4737
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004738 if (svm_nested_virtualize_tpr(vcpu) ||
4739 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01004740 return;
4741
Radim Krčmář596f3142014-03-11 19:11:18 +01004742 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4743
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004744 if (irr == -1)
4745 return;
4746
4747 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004748 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004749}
4750
Yang Zhang8d146952013-01-25 10:18:50 +08004751static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
4752{
4753 return;
4754}
4755
Suravee Suthikulpanitb2a05fe2017-09-12 10:42:41 -05004756static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004757{
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004758 return avic && irqchip_split(vcpu->kvm);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004759}
4760
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004761static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
4762{
4763}
4764
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02004765static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004766{
4767}
4768
4769/* Note: Currently only used by Hyper-V. */
Andrey Smetanind62caab2015-11-10 15:36:33 +03004770static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4771{
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004772 struct vcpu_svm *svm = to_svm(vcpu);
4773 struct vmcb *vmcb = svm->vmcb;
4774
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004775 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004776 return;
4777
4778 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
4779 mark_dirty(vmcb, VMCB_INTR);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004780}
4781
Andrey Smetanin63086302015-11-10 15:36:32 +03004782static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004783{
4784 return;
4785}
4786
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004787static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4788{
4789 kvm_lapic_set_irr(vec, vcpu->arch.apic);
4790 smp_mb__after_atomic();
4791
4792 if (avic_vcpu_is_running(vcpu))
4793 wrmsrl(SVM_AVIC_DOORBELL,
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05004794 kvm_cpu_get_apicid(vcpu->cpu));
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004795 else
4796 kvm_vcpu_wake_up(vcpu);
4797}
4798
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004799static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4800{
4801 unsigned long flags;
4802 struct amd_svm_iommu_ir *cur;
4803
4804 spin_lock_irqsave(&svm->ir_list_lock, flags);
4805 list_for_each_entry(cur, &svm->ir_list, node) {
4806 if (cur->data != pi->ir_data)
4807 continue;
4808 list_del(&cur->node);
4809 kfree(cur);
4810 break;
4811 }
4812 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4813}
4814
4815static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4816{
4817 int ret = 0;
4818 unsigned long flags;
4819 struct amd_svm_iommu_ir *ir;
4820
4821 /**
4822 * In some cases, the existing irte is updaed and re-set,
4823 * so we need to check here if it's already been * added
4824 * to the ir_list.
4825 */
4826 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
4827 struct kvm *kvm = svm->vcpu.kvm;
4828 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
4829 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
4830 struct vcpu_svm *prev_svm;
4831
4832 if (!prev_vcpu) {
4833 ret = -EINVAL;
4834 goto out;
4835 }
4836
4837 prev_svm = to_svm(prev_vcpu);
4838 svm_ir_list_del(prev_svm, pi);
4839 }
4840
4841 /**
4842 * Allocating new amd_iommu_pi_data, which will get
4843 * add to the per-vcpu ir_list.
4844 */
4845 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
4846 if (!ir) {
4847 ret = -ENOMEM;
4848 goto out;
4849 }
4850 ir->data = pi->ir_data;
4851
4852 spin_lock_irqsave(&svm->ir_list_lock, flags);
4853 list_add(&ir->node, &svm->ir_list);
4854 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4855out:
4856 return ret;
4857}
4858
4859/**
4860 * Note:
4861 * The HW cannot support posting multicast/broadcast
4862 * interrupts to a vCPU. So, we still use legacy interrupt
4863 * remapping for these kind of interrupts.
4864 *
4865 * For lowest-priority interrupts, we only support
4866 * those with single CPU as the destination, e.g. user
4867 * configures the interrupts via /proc/irq or uses
4868 * irqbalance to make the interrupts single-CPU.
4869 */
4870static int
4871get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
4872 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
4873{
4874 struct kvm_lapic_irq irq;
4875 struct kvm_vcpu *vcpu = NULL;
4876
4877 kvm_set_msi_irq(kvm, e, &irq);
4878
4879 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
4880 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
4881 __func__, irq.vector);
4882 return -1;
4883 }
4884
4885 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
4886 irq.vector);
4887 *svm = to_svm(vcpu);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05004888 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004889 vcpu_info->vector = irq.vector;
4890
4891 return 0;
4892}
4893
4894/*
4895 * svm_update_pi_irte - set IRTE for Posted-Interrupts
4896 *
4897 * @kvm: kvm
4898 * @host_irq: host irq of the interrupt
4899 * @guest_irq: gsi of the interrupt
4900 * @set: set or unset PI
4901 * returns 0 on success, < 0 on failure
4902 */
4903static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4904 uint32_t guest_irq, bool set)
4905{
4906 struct kvm_kernel_irq_routing_entry *e;
4907 struct kvm_irq_routing_table *irq_rt;
4908 int idx, ret = -EINVAL;
4909
4910 if (!kvm_arch_has_assigned_device(kvm) ||
4911 !irq_remapping_cap(IRQ_POSTING_CAP))
4912 return 0;
4913
4914 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
4915 __func__, host_irq, guest_irq, set);
4916
4917 idx = srcu_read_lock(&kvm->irq_srcu);
4918 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
4919 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
4920
4921 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
4922 struct vcpu_data vcpu_info;
4923 struct vcpu_svm *svm = NULL;
4924
4925 if (e->type != KVM_IRQ_ROUTING_MSI)
4926 continue;
4927
4928 /**
4929 * Here, we setup with legacy mode in the following cases:
4930 * 1. When cannot target interrupt to a specific vcpu.
4931 * 2. Unsetting posted interrupt.
4932 * 3. APIC virtialization is disabled for the vcpu.
4933 */
4934 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
4935 kvm_vcpu_apicv_active(&svm->vcpu)) {
4936 struct amd_iommu_pi_data pi;
4937
4938 /* Try to enable guest_mode in IRTE */
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05004939 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
4940 AVIC_HPA_MASK);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004941 pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
4942 svm->vcpu.vcpu_id);
4943 pi.is_guest_mode = true;
4944 pi.vcpu_data = &vcpu_info;
4945 ret = irq_set_vcpu_affinity(host_irq, &pi);
4946
4947 /**
4948 * Here, we successfully setting up vcpu affinity in
4949 * IOMMU guest mode. Now, we need to store the posted
4950 * interrupt information in a per-vcpu ir_list so that
4951 * we can reference to them directly when we update vcpu
4952 * scheduling information in IOMMU irte.
4953 */
4954 if (!ret && pi.is_guest_mode)
4955 svm_ir_list_add(svm, &pi);
4956 } else {
4957 /* Use legacy mode in IRTE */
4958 struct amd_iommu_pi_data pi;
4959
4960 /**
4961 * Here, pi is used to:
4962 * - Tell IOMMU to use legacy mode for this interrupt.
4963 * - Retrieve ga_tag of prior interrupt remapping data.
4964 */
4965 pi.is_guest_mode = false;
4966 ret = irq_set_vcpu_affinity(host_irq, &pi);
4967
4968 /**
4969 * Check if the posted interrupt was previously
4970 * setup with the guest_mode by checking if the ga_tag
4971 * was cached. If so, we need to clean up the per-vcpu
4972 * ir_list.
4973 */
4974 if (!ret && pi.prev_ga_tag) {
4975 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
4976 struct kvm_vcpu *vcpu;
4977
4978 vcpu = kvm_get_vcpu_by_id(kvm, id);
4979 if (vcpu)
4980 svm_ir_list_del(to_svm(vcpu), &pi);
4981 }
4982 }
4983
4984 if (!ret && svm) {
4985 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
4986 host_irq, e->gsi,
4987 vcpu_info.vector,
4988 vcpu_info.pi_desc_addr, set);
4989 }
4990
4991 if (ret < 0) {
4992 pr_err("%s: failed to update PI IRTE\n", __func__);
4993 goto out;
4994 }
4995 }
4996
4997 ret = 0;
4998out:
4999 srcu_read_unlock(&kvm->irq_srcu, idx);
5000 return ret;
5001}
5002
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005003static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02005004{
5005 struct vcpu_svm *svm = to_svm(vcpu);
5006 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02005007 int ret;
5008 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
5009 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
5010 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
5011
5012 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02005013}
5014
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005015static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
5016{
5017 struct vcpu_svm *svm = to_svm(vcpu);
5018
5019 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
5020}
5021
5022static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5023{
5024 struct vcpu_svm *svm = to_svm(vcpu);
5025
5026 if (masked) {
5027 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01005028 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005029 } else {
5030 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01005031 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005032 }
5033}
5034
Gleb Natapov78646122009-03-23 12:12:11 +02005035static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
5036{
5037 struct vcpu_svm *svm = to_svm(vcpu);
5038 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005039 int ret;
5040
5041 if (!gif_set(svm) ||
5042 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
5043 return 0;
5044
Avi Kivityf6e78472010-08-02 15:30:20 +03005045 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005046
Joerg Roedel20307532010-11-29 17:51:48 +01005047 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005048 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5049
5050 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02005051}
5052
Jan Kiszkac9a79532014-03-07 20:03:15 +01005053static void enable_irq_window(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03005054{
Alexander Graf219b65d2009-06-15 15:21:25 +02005055 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02005056
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05005057 if (kvm_vcpu_apicv_active(vcpu))
5058 return;
5059
Joerg Roedele0231712010-02-24 18:59:10 +01005060 /*
5061 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5062 * 1, because that's a separate STGI/VMRUN intercept. The next time we
5063 * get that intercept, this function will be called again though and
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005064 * we'll get the vintr intercept. However, if the vGIF feature is
5065 * enabled, the STGI interception will not occur. Enable the irq
5066 * window under the assumption that the hardware will set the GIF.
Joerg Roedele0231712010-02-24 18:59:10 +01005067 */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005068 if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02005069 svm_set_vintr(svm);
5070 svm_inject_irq(svm, 0x0);
5071 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005072}
5073
Jan Kiszkac9a79532014-03-07 20:03:15 +01005074static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005075{
Avi Kivity04d2cc72007-09-10 18:10:54 +03005076 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03005077
Gleb Natapov44c11432009-05-11 13:35:52 +03005078 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5079 == HF_NMI_MASK)
Jan Kiszkac9a79532014-03-07 20:03:15 +01005080 return; /* IRET will cause a vm exit */
Gleb Natapov44c11432009-05-11 13:35:52 +03005081
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005082 if (!gif_set(svm)) {
5083 if (vgif_enabled(svm))
5084 set_intercept(svm, INTERCEPT_STGI);
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005085 return; /* STGI will cause a vm exit */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005086 }
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005087
5088 if (svm->nested.exit_required)
5089 return; /* we're not going to run the guest yet */
5090
Joerg Roedele0231712010-02-24 18:59:10 +01005091 /*
5092 * Something prevents NMI from been injected. Single step over possible
5093 * problem (IRET or exception injection or interrupt shadow)
5094 */
Ladi Prosekab2f4d732017-06-21 09:06:58 +02005095 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
Jan Kiszka6be7d302009-10-18 13:24:54 +02005096 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03005097 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Eddie Dong85f455f2007-07-06 12:20:49 +03005098}
5099
Izik Eiduscbc94022007-10-25 00:29:55 +02005100static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5101{
5102 return 0;
5103}
5104
Avi Kivityd9e368d2007-06-07 19:18:30 +03005105static void svm_flush_tlb(struct kvm_vcpu *vcpu)
5106{
Joerg Roedel38e5e922010-12-03 15:25:16 +01005107 struct vcpu_svm *svm = to_svm(vcpu);
5108
5109 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5110 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5111 else
5112 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03005113}
5114
Avi Kivity04d2cc72007-09-10 18:10:54 +03005115static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5116{
5117}
5118
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005119static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5120{
5121 struct vcpu_svm *svm = to_svm(vcpu);
5122
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005123 if (svm_nested_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005124 return;
5125
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01005126 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005127 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03005128 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005129 }
5130}
5131
Joerg Roedel649d6862008-04-16 16:51:15 +02005132static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5133{
5134 struct vcpu_svm *svm = to_svm(vcpu);
5135 u64 cr8;
5136
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005137 if (svm_nested_virtualize_tpr(vcpu) ||
5138 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005139 return;
5140
Joerg Roedel649d6862008-04-16 16:51:15 +02005141 cr8 = kvm_get_cr8(vcpu);
5142 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5143 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5144}
5145
Gleb Natapov9222be12009-04-23 17:14:37 +03005146static void svm_complete_interrupts(struct vcpu_svm *svm)
5147{
5148 u8 vector;
5149 int type;
5150 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01005151 unsigned int3_injected = svm->int3_injected;
5152
5153 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005154
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02005155 /*
5156 * If we've made progress since setting HF_IRET_MASK, we've
5157 * executed an IRET and can allow NMI injection.
5158 */
5159 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5160 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03005161 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03005162 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5163 }
Gleb Natapov44c11432009-05-11 13:35:52 +03005164
Gleb Natapov9222be12009-04-23 17:14:37 +03005165 svm->vcpu.arch.nmi_injected = false;
5166 kvm_clear_exception_queue(&svm->vcpu);
5167 kvm_clear_interrupt_queue(&svm->vcpu);
5168
5169 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5170 return;
5171
Avi Kivity3842d132010-07-27 12:30:24 +03005172 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5173
Gleb Natapov9222be12009-04-23 17:14:37 +03005174 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5175 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5176
5177 switch (type) {
5178 case SVM_EXITINTINFO_TYPE_NMI:
5179 svm->vcpu.arch.nmi_injected = true;
5180 break;
5181 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01005182 /*
5183 * In case of software exceptions, do not reinject the vector,
5184 * but re-execute the instruction instead. Rewind RIP first
5185 * if we emulated INT3 before.
5186 */
5187 if (kvm_exception_is_soft(vector)) {
5188 if (vector == BP_VECTOR && int3_injected &&
5189 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5190 kvm_rip_write(&svm->vcpu,
5191 kvm_rip_read(&svm->vcpu) -
5192 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02005193 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01005194 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005195 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5196 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005197 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03005198
5199 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005200 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03005201 break;
5202 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005203 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03005204 break;
5205 default:
5206 break;
5207 }
5208}
5209
Avi Kivityb463a6f2010-07-20 15:06:17 +03005210static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5211{
5212 struct vcpu_svm *svm = to_svm(vcpu);
5213 struct vmcb_control_area *control = &svm->vmcb->control;
5214
5215 control->exit_int_info = control->event_inj;
5216 control->exit_int_info_err = control->event_inj_err;
5217 control->event_inj = 0;
5218 svm_complete_interrupts(svm);
5219}
5220
Avi Kivity851ba692009-08-24 11:10:17 +03005221static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005222{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005223 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03005224
Joerg Roedel2041a062010-04-22 12:33:08 +02005225 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5226 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5227 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5228
Joerg Roedelcd3ff652009-10-09 16:08:26 +02005229 /*
5230 * A vmexit emulation is required before the vcpu can be executed
5231 * again.
5232 */
5233 if (unlikely(svm->nested.exit_required))
5234 return;
5235
Ladi Proseka12713c2017-06-21 09:07:00 +02005236 /*
5237 * Disable singlestep if we're injecting an interrupt/exception.
5238 * We don't want our modified rflags to be pushed on the stack where
5239 * we might not be able to easily reset them if we disabled NMI
5240 * singlestep later.
5241 */
5242 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5243 /*
5244 * Event injection happens before external interrupts cause a
5245 * vmexit and interrupts are disabled here, so smp_send_reschedule
5246 * is enough to force an immediate vmexit.
5247 */
5248 disable_nmi_singlestep(svm);
5249 smp_send_reschedule(vcpu->cpu);
5250 }
5251
Rusty Russelle756fc62007-07-30 20:07:08 +10005252 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005253
Joerg Roedel649d6862008-04-16 16:51:15 +02005254 sync_lapic_to_cr8(vcpu);
5255
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02005256 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005257
Avi Kivity04d2cc72007-09-10 18:10:54 +03005258 clgi();
5259
5260 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08005261
Avi Kivity6aa8b732006-12-10 02:21:36 -08005262 asm volatile (
Avi Kivity74547662012-09-16 15:10:59 +03005263 "push %%" _ASM_BP "; \n\t"
5264 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5265 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5266 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5267 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5268 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5269 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005270#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005271 "mov %c[r8](%[svm]), %%r8 \n\t"
5272 "mov %c[r9](%[svm]), %%r9 \n\t"
5273 "mov %c[r10](%[svm]), %%r10 \n\t"
5274 "mov %c[r11](%[svm]), %%r11 \n\t"
5275 "mov %c[r12](%[svm]), %%r12 \n\t"
5276 "mov %c[r13](%[svm]), %%r13 \n\t"
5277 "mov %c[r14](%[svm]), %%r14 \n\t"
5278 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005279#endif
5280
Avi Kivity6aa8b732006-12-10 02:21:36 -08005281 /* Enter guest mode */
Avi Kivity74547662012-09-16 15:10:59 +03005282 "push %%" _ASM_AX " \n\t"
5283 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03005284 __ex(SVM_VMLOAD) "\n\t"
5285 __ex(SVM_VMRUN) "\n\t"
5286 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity74547662012-09-16 15:10:59 +03005287 "pop %%" _ASM_AX " \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005288
5289 /* Save guest registers, load host registers */
Avi Kivity74547662012-09-16 15:10:59 +03005290 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5291 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5292 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5293 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5294 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5295 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005296#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005297 "mov %%r8, %c[r8](%[svm]) \n\t"
5298 "mov %%r9, %c[r9](%[svm]) \n\t"
5299 "mov %%r10, %c[r10](%[svm]) \n\t"
5300 "mov %%r11, %c[r11](%[svm]) \n\t"
5301 "mov %%r12, %c[r12](%[svm]) \n\t"
5302 "mov %%r13, %c[r13](%[svm]) \n\t"
5303 "mov %%r14, %c[r14](%[svm]) \n\t"
5304 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005305#endif
Avi Kivity74547662012-09-16 15:10:59 +03005306 "pop %%" _ASM_BP
Avi Kivity6aa8b732006-12-10 02:21:36 -08005307 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005308 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08005309 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005310 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5311 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5312 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5313 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5314 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5315 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005316#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005317 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5318 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5319 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5320 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5321 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5322 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5323 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5324 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08005325#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02005326 : "cc", "memory"
5327#ifdef CONFIG_X86_64
Avi Kivity74547662012-09-16 15:10:59 +03005328 , "rbx", "rcx", "rdx", "rsi", "rdi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005329 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Avi Kivity74547662012-09-16 15:10:59 +03005330#else
5331 , "ebx", "ecx", "edx", "esi", "edi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005332#endif
5333 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08005334
Avi Kivity82ca2d12010-10-21 12:20:34 +02005335#ifdef CONFIG_X86_64
5336 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5337#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02005338 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02005339#ifndef CONFIG_X86_32_LAZY_GS
5340 loadsegment(gs, svm->host.gs);
5341#endif
Avi Kivity9581d442010-10-19 16:46:55 +02005342#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08005343
5344 reload_tss(vcpu);
5345
Avi Kivity56ba47d2007-11-07 17:14:18 +02005346 local_irq_disable();
5347
Avi Kivity13c34e02010-10-21 12:20:31 +02005348 vcpu->arch.cr2 = svm->vmcb->save.cr2;
5349 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5350 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5351 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5352
Joerg Roedel3781c012011-01-14 16:45:02 +01005353 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5354 kvm_before_handle_nmi(&svm->vcpu);
5355
5356 stgi();
5357
5358 /* Any pending NMI will happen here */
5359
5360 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5361 kvm_after_handle_nmi(&svm->vcpu);
5362
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005363 sync_cr8_to_lapic(vcpu);
5364
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005365 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005366
Joerg Roedel38e5e922010-12-03 15:25:16 +01005367 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5368
Gleb Natapov631bc482010-10-14 11:22:52 +02005369 /* if exit due to PF check for async PF */
5370 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
Wanpeng Li1261bfa2017-07-13 18:30:40 -07005371 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
Gleb Natapov631bc482010-10-14 11:22:52 +02005372
Avi Kivity6de4f3a2009-05-31 22:58:47 +03005373 if (npt_enabled) {
5374 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5375 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5376 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02005377
5378 /*
5379 * We need to handle MC intercepts here before the vcpu has a chance to
5380 * change the physical cpu
5381 */
5382 if (unlikely(svm->vmcb->control.exit_code ==
5383 SVM_EXIT_EXCP_BASE + MC_VECTOR))
5384 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01005385
5386 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005387}
Josh Poimboeufc207aee2017-06-28 10:11:06 -05005388STACK_FRAME_NON_STANDARD(svm_vcpu_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005389
Avi Kivity6aa8b732006-12-10 02:21:36 -08005390static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5391{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005392 struct vcpu_svm *svm = to_svm(vcpu);
5393
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005394 svm->vmcb->save.cr3 = __sme_set(root);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005395 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01005396 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005397}
5398
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005399static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5400{
5401 struct vcpu_svm *svm = to_svm(vcpu);
5402
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005403 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01005404 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005405
5406 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02005407 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005408 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005409
Joerg Roedelf40f6a42010-12-03 15:25:15 +01005410 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005411}
5412
Avi Kivity6aa8b732006-12-10 02:21:36 -08005413static int is_disabled(void)
5414{
Joerg Roedel6031a612007-06-22 12:29:50 +03005415 u64 vm_cr;
5416
5417 rdmsrl(MSR_VM_CR, vm_cr);
5418 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5419 return 1;
5420
Avi Kivity6aa8b732006-12-10 02:21:36 -08005421 return 0;
5422}
5423
Ingo Molnar102d8322007-02-19 14:37:47 +02005424static void
5425svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5426{
5427 /*
5428 * Patch in the VMMCALL instruction:
5429 */
5430 hypercall[0] = 0x0f;
5431 hypercall[1] = 0x01;
5432 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02005433}
5434
Yang, Sheng002c7f72007-07-31 14:23:01 +03005435static void svm_check_processor_compat(void *rtn)
5436{
5437 *(int *)rtn = 0;
5438}
5439
Avi Kivity774ead32007-12-26 13:57:04 +02005440static bool svm_cpu_has_accelerated_tpr(void)
5441{
5442 return false;
5443}
5444
Paolo Bonzini6d396b52015-04-01 14:25:33 +02005445static bool svm_has_high_real_mode_segbase(void)
5446{
5447 return true;
5448}
5449
Paolo Bonzinifc07e762015-10-01 13:20:22 +02005450static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5451{
5452 return 0;
5453}
5454
Sheng Yang0e851882009-12-18 16:48:46 +08005455static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5456{
Joerg Roedel6092d3d2015-10-14 15:10:54 +02005457 struct vcpu_svm *svm = to_svm(vcpu);
5458
5459 /* Update nrips enabled cache */
Radim Krčmářd6321d42017-08-05 00:12:49 +02005460 svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005461
5462 if (!kvm_vcpu_apicv_active(vcpu))
5463 return;
5464
Radim Krčmář1b4d56b2017-08-05 00:12:50 +02005465 guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
Sheng Yang0e851882009-12-18 16:48:46 +08005466}
5467
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005468static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5469{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005470 switch (func) {
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005471 case 0x1:
5472 if (avic)
5473 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5474 break;
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02005475 case 0x80000001:
5476 if (nested)
5477 entry->ecx |= (1 << 2); /* Set SVM bit */
5478 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005479 case 0x8000000A:
5480 entry->eax = 1; /* SVM revision 1 */
5481 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5482 ASID emulation to nested SVM */
5483 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02005484 entry->edx = 0; /* Per default do not support any
5485 additional features */
5486
5487 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02005488 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02005489 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005490
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02005491 /* Support NPT for the guest if enabled */
5492 if (npt_enabled)
5493 entry->edx |= SVM_FEATURE_NPT;
5494
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005495 break;
Brijesh Singh8765d752017-12-04 10:57:25 -06005496 case 0x8000001F:
5497 /* Support memory encryption cpuid if host supports it */
5498 if (boot_cpu_has(X86_FEATURE_SEV))
5499 cpuid(0x8000001f, &entry->eax, &entry->ebx,
5500 &entry->ecx, &entry->edx);
5501
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005502 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005503}
5504
Sheng Yang17cc3932010-01-05 19:02:27 +08005505static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02005506{
Sheng Yang17cc3932010-01-05 19:02:27 +08005507 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02005508}
5509
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005510static bool svm_rdtscp_supported(void)
5511{
Paolo Bonzini46896c72015-11-12 14:49:16 +01005512 return boot_cpu_has(X86_FEATURE_RDTSCP);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005513}
5514
Mao, Junjiead756a12012-07-02 01:18:48 +00005515static bool svm_invpcid_supported(void)
5516{
5517 return false;
5518}
5519
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01005520static bool svm_mpx_supported(void)
5521{
5522 return false;
5523}
5524
Wanpeng Li55412b22014-12-02 19:21:30 +08005525static bool svm_xsaves_supported(void)
5526{
5527 return false;
5528}
5529
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005530static bool svm_has_wbinvd_exit(void)
5531{
5532 return true;
5533}
5534
Joerg Roedel80612522011-04-04 12:39:33 +02005535#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005536 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005537#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005538 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005539#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005540 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005541
Mathias Krause09941fb2012-08-30 01:30:20 +02005542static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005543 u32 exit_code;
5544 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005545} x86_intercept_map[] = {
5546 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
5547 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
5548 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
5549 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
5550 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02005551 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
5552 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02005553 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
5554 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
5555 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
5556 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
5557 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
5558 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
5559 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
5560 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02005561 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
5562 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
5563 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
5564 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
5565 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
5566 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
5567 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
5568 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005569 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
5570 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
5571 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02005572 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
5573 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
5574 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
5575 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
5576 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
5577 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
5578 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
5579 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
5580 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02005581 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
5582 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
5583 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
5584 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
5585 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
5586 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
5587 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02005588 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
5589 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
5590 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
5591 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005592};
5593
Joerg Roedel80612522011-04-04 12:39:33 +02005594#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005595#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005596#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005597
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005598static int svm_check_intercept(struct kvm_vcpu *vcpu,
5599 struct x86_instruction_info *info,
5600 enum x86_intercept_stage stage)
5601{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005602 struct vcpu_svm *svm = to_svm(vcpu);
5603 int vmexit, ret = X86EMUL_CONTINUE;
5604 struct __x86_intercept icpt_info;
5605 struct vmcb *vmcb = svm->vmcb;
5606
5607 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
5608 goto out;
5609
5610 icpt_info = x86_intercept_map[info->intercept];
5611
Avi Kivity40e19b52011-04-21 12:35:41 +03005612 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005613 goto out;
5614
5615 switch (icpt_info.exit_code) {
5616 case SVM_EXIT_READ_CR0:
5617 if (info->intercept == x86_intercept_cr_read)
5618 icpt_info.exit_code += info->modrm_reg;
5619 break;
5620 case SVM_EXIT_WRITE_CR0: {
5621 unsigned long cr0, val;
5622 u64 intercept;
5623
5624 if (info->intercept == x86_intercept_cr_write)
5625 icpt_info.exit_code += info->modrm_reg;
5626
Jan Kiszka62baf442014-06-29 21:55:53 +02005627 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
5628 info->intercept == x86_intercept_clts)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005629 break;
5630
5631 intercept = svm->nested.intercept;
5632
5633 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
5634 break;
5635
5636 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
5637 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
5638
5639 if (info->intercept == x86_intercept_lmsw) {
5640 cr0 &= 0xfUL;
5641 val &= 0xfUL;
5642 /* lmsw can't clear PE - catch this here */
5643 if (cr0 & X86_CR0_PE)
5644 val |= X86_CR0_PE;
5645 }
5646
5647 if (cr0 ^ val)
5648 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
5649
5650 break;
5651 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02005652 case SVM_EXIT_READ_DR0:
5653 case SVM_EXIT_WRITE_DR0:
5654 icpt_info.exit_code += info->modrm_reg;
5655 break;
Joerg Roedel80612522011-04-04 12:39:33 +02005656 case SVM_EXIT_MSR:
5657 if (info->intercept == x86_intercept_wrmsr)
5658 vmcb->control.exit_info_1 = 1;
5659 else
5660 vmcb->control.exit_info_1 = 0;
5661 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02005662 case SVM_EXIT_PAUSE:
5663 /*
5664 * We get this for NOP only, but pause
5665 * is rep not, check this here
5666 */
5667 if (info->rep_prefix != REPE_PREFIX)
5668 goto out;
Jan H. Schönherr49a8afc2017-09-05 23:58:44 +02005669 break;
Joerg Roedelf6511932011-04-04 12:39:35 +02005670 case SVM_EXIT_IOIO: {
5671 u64 exit_info;
5672 u32 bytes;
5673
Joerg Roedelf6511932011-04-04 12:39:35 +02005674 if (info->intercept == x86_intercept_in ||
5675 info->intercept == x86_intercept_ins) {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005676 exit_info = ((info->src_val & 0xffff) << 16) |
5677 SVM_IOIO_TYPE_MASK;
Joerg Roedelf6511932011-04-04 12:39:35 +02005678 bytes = info->dst_bytes;
Jan Kiszka6493f152014-06-30 11:07:05 +02005679 } else {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005680 exit_info = (info->dst_val & 0xffff) << 16;
Jan Kiszka6493f152014-06-30 11:07:05 +02005681 bytes = info->src_bytes;
Joerg Roedelf6511932011-04-04 12:39:35 +02005682 }
5683
5684 if (info->intercept == x86_intercept_outs ||
5685 info->intercept == x86_intercept_ins)
5686 exit_info |= SVM_IOIO_STR_MASK;
5687
5688 if (info->rep_prefix)
5689 exit_info |= SVM_IOIO_REP_MASK;
5690
5691 bytes = min(bytes, 4u);
5692
5693 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
5694
5695 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
5696
5697 vmcb->control.exit_info_1 = exit_info;
5698 vmcb->control.exit_info_2 = info->next_rip;
5699
5700 break;
5701 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005702 default:
5703 break;
5704 }
5705
Bandan Dasf1047652015-06-11 02:05:33 -04005706 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
5707 if (static_cpu_has(X86_FEATURE_NRIPS))
5708 vmcb->control.next_rip = info->next_rip;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005709 vmcb->control.exit_code = icpt_info.exit_code;
5710 vmexit = nested_svm_exit_handled(svm);
5711
5712 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
5713 : X86EMUL_CONTINUE;
5714
5715out:
5716 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005717}
5718
Yang Zhanga547c6d2013-04-11 19:25:10 +08005719static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
5720{
5721 local_irq_enable();
Paolo Bonzinif2485b32016-06-15 15:23:11 +02005722 /*
5723 * We must have an instruction with interrupts enabled, so
5724 * the timer interrupt isn't delayed by the interrupt shadow.
5725 */
5726 asm("nop");
5727 local_irq_disable();
Yang Zhanga547c6d2013-04-11 19:25:10 +08005728}
5729
Radim Krčmářae97a3b2014-08-21 18:08:06 +02005730static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
5731{
5732}
5733
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05005734static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
5735{
5736 if (avic_handle_apic_id_update(vcpu) != 0)
5737 return;
5738 if (avic_handle_dfr_update(vcpu) != 0)
5739 return;
5740 avic_handle_ldr_update(vcpu);
5741}
5742
Borislav Petkov74f16902017-03-26 23:51:24 +02005743static void svm_setup_mce(struct kvm_vcpu *vcpu)
5744{
5745 /* [63:9] are reserved. */
5746 vcpu->arch.mcg_cap &= 0x1ff;
5747}
5748
Ladi Prosek72d7b372017-10-11 16:54:41 +02005749static int svm_smi_allowed(struct kvm_vcpu *vcpu)
5750{
Ladi Prosek05cade72017-10-11 16:54:45 +02005751 struct vcpu_svm *svm = to_svm(vcpu);
5752
5753 /* Per APM Vol.2 15.22.2 "Response to SMI" */
5754 if (!gif_set(svm))
5755 return 0;
5756
5757 if (is_guest_mode(&svm->vcpu) &&
5758 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
5759 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
5760 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
5761 svm->nested.exit_required = true;
5762 return 0;
5763 }
5764
Ladi Prosek72d7b372017-10-11 16:54:41 +02005765 return 1;
5766}
5767
Ladi Prosek0234bf82017-10-11 16:54:40 +02005768static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
5769{
Ladi Prosek05cade72017-10-11 16:54:45 +02005770 struct vcpu_svm *svm = to_svm(vcpu);
5771 int ret;
5772
5773 if (is_guest_mode(vcpu)) {
5774 /* FED8h - SVM Guest */
5775 put_smstate(u64, smstate, 0x7ed8, 1);
5776 /* FEE0h - SVM Guest VMCB Physical Address */
5777 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
5778
5779 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5780 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5781 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5782
5783 ret = nested_svm_vmexit(svm);
5784 if (ret)
5785 return ret;
5786 }
Ladi Prosek0234bf82017-10-11 16:54:40 +02005787 return 0;
5788}
5789
5790static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
5791{
Ladi Prosek05cade72017-10-11 16:54:45 +02005792 struct vcpu_svm *svm = to_svm(vcpu);
5793 struct vmcb *nested_vmcb;
5794 struct page *page;
5795 struct {
5796 u64 guest;
5797 u64 vmcb;
5798 } svm_state_save;
5799 int ret;
5800
5801 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
5802 sizeof(svm_state_save));
5803 if (ret)
5804 return ret;
5805
5806 if (svm_state_save.guest) {
5807 vcpu->arch.hflags &= ~HF_SMM_MASK;
5808 nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
5809 if (nested_vmcb)
5810 enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
5811 else
5812 ret = 1;
5813 vcpu->arch.hflags |= HF_SMM_MASK;
5814 }
5815 return ret;
Ladi Prosek0234bf82017-10-11 16:54:40 +02005816}
5817
Ladi Prosekcc3d9672017-10-17 16:02:39 +02005818static int enable_smi_window(struct kvm_vcpu *vcpu)
5819{
5820 struct vcpu_svm *svm = to_svm(vcpu);
5821
5822 if (!gif_set(svm)) {
5823 if (vgif_enabled(svm))
5824 set_intercept(svm, INTERCEPT_STGI);
5825 /* STGI will cause a vm exit */
5826 return 1;
5827 }
5828 return 0;
5829}
5830
Brijesh Singh1654efc2017-12-04 10:57:34 -06005831static int sev_asid_new(void)
5832{
5833 int pos;
5834
5835 /*
5836 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
5837 */
5838 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
5839 if (pos >= max_sev_asid)
5840 return -EBUSY;
5841
5842 set_bit(pos, sev_asid_bitmap);
5843 return pos + 1;
5844}
5845
5846static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
5847{
5848 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5849 int asid, ret;
5850
5851 ret = -EBUSY;
5852 asid = sev_asid_new();
5853 if (asid < 0)
5854 return ret;
5855
5856 ret = sev_platform_init(&argp->error);
5857 if (ret)
5858 goto e_free;
5859
5860 sev->active = true;
5861 sev->asid = asid;
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06005862 INIT_LIST_HEAD(&sev->regions_list);
Brijesh Singh1654efc2017-12-04 10:57:34 -06005863
5864 return 0;
5865
5866e_free:
5867 __sev_asid_free(asid);
5868 return ret;
5869}
5870
Brijesh Singh59414c92017-12-04 10:57:35 -06005871static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
5872{
5873 struct sev_data_activate *data;
5874 int asid = sev_get_asid(kvm);
5875 int ret;
5876
5877 wbinvd_on_all_cpus();
5878
5879 ret = sev_guest_df_flush(error);
5880 if (ret)
5881 return ret;
5882
5883 data = kzalloc(sizeof(*data), GFP_KERNEL);
5884 if (!data)
5885 return -ENOMEM;
5886
5887 /* activate ASID on the given handle */
5888 data->handle = handle;
5889 data->asid = asid;
5890 ret = sev_guest_activate(data, error);
5891 kfree(data);
5892
5893 return ret;
5894}
5895
Brijesh Singh89c50582017-12-04 10:57:35 -06005896static int __sev_issue_cmd(int fd, int id, void *data, int *error)
Brijesh Singh59414c92017-12-04 10:57:35 -06005897{
5898 struct fd f;
5899 int ret;
5900
5901 f = fdget(fd);
5902 if (!f.file)
5903 return -EBADF;
5904
5905 ret = sev_issue_cmd_external_user(f.file, id, data, error);
5906
5907 fdput(f);
5908 return ret;
5909}
5910
Brijesh Singh89c50582017-12-04 10:57:35 -06005911static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
5912{
5913 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5914
5915 return __sev_issue_cmd(sev->fd, id, data, error);
5916}
5917
Brijesh Singh59414c92017-12-04 10:57:35 -06005918static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
5919{
5920 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5921 struct sev_data_launch_start *start;
5922 struct kvm_sev_launch_start params;
5923 void *dh_blob, *session_blob;
5924 int *error = &argp->error;
5925 int ret;
5926
5927 if (!sev_guest(kvm))
5928 return -ENOTTY;
5929
5930 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
5931 return -EFAULT;
5932
5933 start = kzalloc(sizeof(*start), GFP_KERNEL);
5934 if (!start)
5935 return -ENOMEM;
5936
5937 dh_blob = NULL;
5938 if (params.dh_uaddr) {
5939 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
5940 if (IS_ERR(dh_blob)) {
5941 ret = PTR_ERR(dh_blob);
5942 goto e_free;
5943 }
5944
5945 start->dh_cert_address = __sme_set(__pa(dh_blob));
5946 start->dh_cert_len = params.dh_len;
5947 }
5948
5949 session_blob = NULL;
5950 if (params.session_uaddr) {
5951 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
5952 if (IS_ERR(session_blob)) {
5953 ret = PTR_ERR(session_blob);
5954 goto e_free_dh;
5955 }
5956
5957 start->session_address = __sme_set(__pa(session_blob));
5958 start->session_len = params.session_len;
5959 }
5960
5961 start->handle = params.handle;
5962 start->policy = params.policy;
5963
5964 /* create memory encryption context */
Brijesh Singh89c50582017-12-04 10:57:35 -06005965 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
Brijesh Singh59414c92017-12-04 10:57:35 -06005966 if (ret)
5967 goto e_free_session;
5968
5969 /* Bind ASID to this guest */
5970 ret = sev_bind_asid(kvm, start->handle, error);
5971 if (ret)
5972 goto e_free_session;
5973
5974 /* return handle to userspace */
5975 params.handle = start->handle;
5976 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
5977 sev_unbind_asid(kvm, start->handle);
5978 ret = -EFAULT;
5979 goto e_free_session;
5980 }
5981
5982 sev->handle = start->handle;
5983 sev->fd = argp->sev_fd;
5984
5985e_free_session:
5986 kfree(session_blob);
5987e_free_dh:
5988 kfree(dh_blob);
5989e_free:
5990 kfree(start);
5991 return ret;
5992}
5993
Brijesh Singh89c50582017-12-04 10:57:35 -06005994static int get_num_contig_pages(int idx, struct page **inpages,
5995 unsigned long npages)
5996{
5997 unsigned long paddr, next_paddr;
5998 int i = idx + 1, pages = 1;
5999
6000 /* find the number of contiguous pages starting from idx */
6001 paddr = __sme_page_pa(inpages[idx]);
6002 while (i < npages) {
6003 next_paddr = __sme_page_pa(inpages[i++]);
6004 if ((paddr + PAGE_SIZE) == next_paddr) {
6005 pages++;
6006 paddr = next_paddr;
6007 continue;
6008 }
6009 break;
6010 }
6011
6012 return pages;
6013}
6014
6015static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6016{
6017 unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
6018 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6019 struct kvm_sev_launch_update_data params;
6020 struct sev_data_launch_update_data *data;
6021 struct page **inpages;
6022 int i, ret, pages;
6023
6024 if (!sev_guest(kvm))
6025 return -ENOTTY;
6026
6027 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6028 return -EFAULT;
6029
6030 data = kzalloc(sizeof(*data), GFP_KERNEL);
6031 if (!data)
6032 return -ENOMEM;
6033
6034 vaddr = params.uaddr;
6035 size = params.len;
6036 vaddr_end = vaddr + size;
6037
6038 /* Lock the user memory. */
6039 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
6040 if (!inpages) {
6041 ret = -ENOMEM;
6042 goto e_free;
6043 }
6044
6045 /*
6046 * The LAUNCH_UPDATE command will perform in-place encryption of the
6047 * memory content (i.e it will write the same memory region with C=1).
6048 * It's possible that the cache may contain the data with C=0, i.e.,
6049 * unencrypted so invalidate it first.
6050 */
6051 sev_clflush_pages(inpages, npages);
6052
6053 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6054 int offset, len;
6055
6056 /*
6057 * If the user buffer is not page-aligned, calculate the offset
6058 * within the page.
6059 */
6060 offset = vaddr & (PAGE_SIZE - 1);
6061
6062 /* Calculate the number of pages that can be encrypted in one go. */
6063 pages = get_num_contig_pages(i, inpages, npages);
6064
6065 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6066
6067 data->handle = sev->handle;
6068 data->len = len;
6069 data->address = __sme_page_pa(inpages[i]) + offset;
6070 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6071 if (ret)
6072 goto e_unpin;
6073
6074 size -= len;
6075 next_vaddr = vaddr + len;
6076 }
6077
6078e_unpin:
6079 /* content of memory is updated, mark pages dirty */
6080 for (i = 0; i < npages; i++) {
6081 set_page_dirty_lock(inpages[i]);
6082 mark_page_accessed(inpages[i]);
6083 }
6084 /* unlock the user pages */
6085 sev_unpin_memory(kvm, inpages, npages);
6086e_free:
6087 kfree(data);
6088 return ret;
6089}
6090
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006091static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6092{
6093 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6094 struct sev_data_launch_measure *data;
6095 struct kvm_sev_launch_measure params;
6096 void *blob = NULL;
6097 int ret;
6098
6099 if (!sev_guest(kvm))
6100 return -ENOTTY;
6101
6102 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6103 return -EFAULT;
6104
6105 data = kzalloc(sizeof(*data), GFP_KERNEL);
6106 if (!data)
6107 return -ENOMEM;
6108
6109 /* User wants to query the blob length */
6110 if (!params.len)
6111 goto cmd;
6112
6113 if (params.uaddr) {
6114 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6115 ret = -EINVAL;
6116 goto e_free;
6117 }
6118
6119 if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
6120 ret = -EFAULT;
6121 goto e_free;
6122 }
6123
6124 ret = -ENOMEM;
6125 blob = kmalloc(params.len, GFP_KERNEL);
6126 if (!blob)
6127 goto e_free;
6128
6129 data->address = __psp_pa(blob);
6130 data->len = params.len;
6131 }
6132
6133cmd:
6134 data->handle = sev->handle;
6135 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6136
6137 /*
6138 * If we query the session length, FW responded with expected data.
6139 */
6140 if (!params.len)
6141 goto done;
6142
6143 if (ret)
6144 goto e_free_blob;
6145
6146 if (blob) {
6147 if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
6148 ret = -EFAULT;
6149 }
6150
6151done:
6152 params.len = data->len;
6153 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6154 ret = -EFAULT;
6155e_free_blob:
6156 kfree(blob);
6157e_free:
6158 kfree(data);
6159 return ret;
6160}
6161
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006162static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6163{
6164 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6165 struct sev_data_launch_finish *data;
6166 int ret;
6167
6168 if (!sev_guest(kvm))
6169 return -ENOTTY;
6170
6171 data = kzalloc(sizeof(*data), GFP_KERNEL);
6172 if (!data)
6173 return -ENOMEM;
6174
6175 data->handle = sev->handle;
6176 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6177
6178 kfree(data);
6179 return ret;
6180}
6181
Brijesh Singh255d9e72017-12-04 10:57:37 -06006182static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6183{
6184 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6185 struct kvm_sev_guest_status params;
6186 struct sev_data_guest_status *data;
6187 int ret;
6188
6189 if (!sev_guest(kvm))
6190 return -ENOTTY;
6191
6192 data = kzalloc(sizeof(*data), GFP_KERNEL);
6193 if (!data)
6194 return -ENOMEM;
6195
6196 data->handle = sev->handle;
6197 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6198 if (ret)
6199 goto e_free;
6200
6201 params.policy = data->policy;
6202 params.state = data->state;
6203 params.handle = data->handle;
6204
6205 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6206 ret = -EFAULT;
6207e_free:
6208 kfree(data);
6209 return ret;
6210}
6211
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006212static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6213 unsigned long dst, int size,
6214 int *error, bool enc)
6215{
6216 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6217 struct sev_data_dbg *data;
6218 int ret;
6219
6220 data = kzalloc(sizeof(*data), GFP_KERNEL);
6221 if (!data)
6222 return -ENOMEM;
6223
6224 data->handle = sev->handle;
6225 data->dst_addr = dst;
6226 data->src_addr = src;
6227 data->len = size;
6228
6229 ret = sev_issue_cmd(kvm,
6230 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6231 data, error);
6232 kfree(data);
6233 return ret;
6234}
6235
6236static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6237 unsigned long dst_paddr, int sz, int *err)
6238{
6239 int offset;
6240
6241 /*
6242 * Its safe to read more than we are asked, caller should ensure that
6243 * destination has enough space.
6244 */
6245 src_paddr = round_down(src_paddr, 16);
6246 offset = src_paddr & 15;
6247 sz = round_up(sz + offset, 16);
6248
6249 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6250}
6251
6252static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6253 unsigned long __user dst_uaddr,
6254 unsigned long dst_paddr,
6255 int size, int *err)
6256{
6257 struct page *tpage = NULL;
6258 int ret, offset;
6259
6260 /* if inputs are not 16-byte then use intermediate buffer */
6261 if (!IS_ALIGNED(dst_paddr, 16) ||
6262 !IS_ALIGNED(paddr, 16) ||
6263 !IS_ALIGNED(size, 16)) {
6264 tpage = (void *)alloc_page(GFP_KERNEL);
6265 if (!tpage)
6266 return -ENOMEM;
6267
6268 dst_paddr = __sme_page_pa(tpage);
6269 }
6270
6271 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6272 if (ret)
6273 goto e_free;
6274
6275 if (tpage) {
6276 offset = paddr & 15;
6277 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6278 page_address(tpage) + offset, size))
6279 ret = -EFAULT;
6280 }
6281
6282e_free:
6283 if (tpage)
6284 __free_page(tpage);
6285
6286 return ret;
6287}
6288
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006289static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6290 unsigned long __user vaddr,
6291 unsigned long dst_paddr,
6292 unsigned long __user dst_vaddr,
6293 int size, int *error)
6294{
6295 struct page *src_tpage = NULL;
6296 struct page *dst_tpage = NULL;
6297 int ret, len = size;
6298
6299 /* If source buffer is not aligned then use an intermediate buffer */
6300 if (!IS_ALIGNED(vaddr, 16)) {
6301 src_tpage = alloc_page(GFP_KERNEL);
6302 if (!src_tpage)
6303 return -ENOMEM;
6304
6305 if (copy_from_user(page_address(src_tpage),
6306 (void __user *)(uintptr_t)vaddr, size)) {
6307 __free_page(src_tpage);
6308 return -EFAULT;
6309 }
6310
6311 paddr = __sme_page_pa(src_tpage);
6312 }
6313
6314 /*
6315 * If destination buffer or length is not aligned then do read-modify-write:
6316 * - decrypt destination in an intermediate buffer
6317 * - copy the source buffer in an intermediate buffer
6318 * - use the intermediate buffer as source buffer
6319 */
6320 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6321 int dst_offset;
6322
6323 dst_tpage = alloc_page(GFP_KERNEL);
6324 if (!dst_tpage) {
6325 ret = -ENOMEM;
6326 goto e_free;
6327 }
6328
6329 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6330 __sme_page_pa(dst_tpage), size, error);
6331 if (ret)
6332 goto e_free;
6333
6334 /*
6335 * If source is kernel buffer then use memcpy() otherwise
6336 * copy_from_user().
6337 */
6338 dst_offset = dst_paddr & 15;
6339
6340 if (src_tpage)
6341 memcpy(page_address(dst_tpage) + dst_offset,
6342 page_address(src_tpage), size);
6343 else {
6344 if (copy_from_user(page_address(dst_tpage) + dst_offset,
6345 (void __user *)(uintptr_t)vaddr, size)) {
6346 ret = -EFAULT;
6347 goto e_free;
6348 }
6349 }
6350
6351 paddr = __sme_page_pa(dst_tpage);
6352 dst_paddr = round_down(dst_paddr, 16);
6353 len = round_up(size, 16);
6354 }
6355
6356 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6357
6358e_free:
6359 if (src_tpage)
6360 __free_page(src_tpage);
6361 if (dst_tpage)
6362 __free_page(dst_tpage);
6363 return ret;
6364}
6365
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006366static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6367{
6368 unsigned long vaddr, vaddr_end, next_vaddr;
6369 unsigned long dst_vaddr, dst_vaddr_end;
6370 struct page **src_p, **dst_p;
6371 struct kvm_sev_dbg debug;
6372 unsigned long n;
6373 int ret, size;
6374
6375 if (!sev_guest(kvm))
6376 return -ENOTTY;
6377
6378 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6379 return -EFAULT;
6380
6381 vaddr = debug.src_uaddr;
6382 size = debug.len;
6383 vaddr_end = vaddr + size;
6384 dst_vaddr = debug.dst_uaddr;
6385 dst_vaddr_end = dst_vaddr + size;
6386
6387 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6388 int len, s_off, d_off;
6389
6390 /* lock userspace source and destination page */
6391 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6392 if (!src_p)
6393 return -EFAULT;
6394
6395 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6396 if (!dst_p) {
6397 sev_unpin_memory(kvm, src_p, n);
6398 return -EFAULT;
6399 }
6400
6401 /*
6402 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6403 * memory content (i.e it will write the same memory region with C=1).
6404 * It's possible that the cache may contain the data with C=0, i.e.,
6405 * unencrypted so invalidate it first.
6406 */
6407 sev_clflush_pages(src_p, 1);
6408 sev_clflush_pages(dst_p, 1);
6409
6410 /*
6411 * Since user buffer may not be page aligned, calculate the
6412 * offset within the page.
6413 */
6414 s_off = vaddr & ~PAGE_MASK;
6415 d_off = dst_vaddr & ~PAGE_MASK;
6416 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6417
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006418 if (dec)
6419 ret = __sev_dbg_decrypt_user(kvm,
6420 __sme_page_pa(src_p[0]) + s_off,
6421 dst_vaddr,
6422 __sme_page_pa(dst_p[0]) + d_off,
6423 len, &argp->error);
6424 else
6425 ret = __sev_dbg_encrypt_user(kvm,
6426 __sme_page_pa(src_p[0]) + s_off,
6427 vaddr,
6428 __sme_page_pa(dst_p[0]) + d_off,
6429 dst_vaddr,
6430 len, &argp->error);
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006431
6432 sev_unpin_memory(kvm, src_p, 1);
6433 sev_unpin_memory(kvm, dst_p, 1);
6434
6435 if (ret)
6436 goto err;
6437
6438 next_vaddr = vaddr + len;
6439 dst_vaddr = dst_vaddr + len;
6440 size -= len;
6441 }
6442err:
6443 return ret;
6444}
6445
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006446static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6447{
6448 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6449 struct sev_data_launch_secret *data;
6450 struct kvm_sev_launch_secret params;
6451 struct page **pages;
6452 void *blob, *hdr;
6453 unsigned long n;
6454 int ret;
6455
6456 if (!sev_guest(kvm))
6457 return -ENOTTY;
6458
6459 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6460 return -EFAULT;
6461
6462 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6463 if (!pages)
6464 return -ENOMEM;
6465
6466 /*
6467 * The secret must be copied into contiguous memory region, lets verify
6468 * that userspace memory pages are contiguous before we issue command.
6469 */
6470 if (get_num_contig_pages(0, pages, n) != n) {
6471 ret = -EINVAL;
6472 goto e_unpin_memory;
6473 }
6474
6475 ret = -ENOMEM;
6476 data = kzalloc(sizeof(*data), GFP_KERNEL);
6477 if (!data)
6478 goto e_unpin_memory;
6479
6480 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6481 if (IS_ERR(blob)) {
6482 ret = PTR_ERR(blob);
6483 goto e_free;
6484 }
6485
6486 data->trans_address = __psp_pa(blob);
6487 data->trans_len = params.trans_len;
6488
6489 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6490 if (IS_ERR(hdr)) {
6491 ret = PTR_ERR(hdr);
6492 goto e_free_blob;
6493 }
6494 data->trans_address = __psp_pa(blob);
6495 data->trans_len = params.trans_len;
6496
6497 data->handle = sev->handle;
6498 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6499
6500 kfree(hdr);
6501
6502e_free_blob:
6503 kfree(blob);
6504e_free:
6505 kfree(data);
6506e_unpin_memory:
6507 sev_unpin_memory(kvm, pages, n);
6508 return ret;
6509}
6510
Brijesh Singh1654efc2017-12-04 10:57:34 -06006511static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6512{
6513 struct kvm_sev_cmd sev_cmd;
6514 int r;
6515
6516 if (!svm_sev_enabled())
6517 return -ENOTTY;
6518
6519 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6520 return -EFAULT;
6521
6522 mutex_lock(&kvm->lock);
6523
6524 switch (sev_cmd.id) {
6525 case KVM_SEV_INIT:
6526 r = sev_guest_init(kvm, &sev_cmd);
6527 break;
Brijesh Singh59414c92017-12-04 10:57:35 -06006528 case KVM_SEV_LAUNCH_START:
6529 r = sev_launch_start(kvm, &sev_cmd);
6530 break;
Brijesh Singh89c50582017-12-04 10:57:35 -06006531 case KVM_SEV_LAUNCH_UPDATE_DATA:
6532 r = sev_launch_update_data(kvm, &sev_cmd);
6533 break;
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006534 case KVM_SEV_LAUNCH_MEASURE:
6535 r = sev_launch_measure(kvm, &sev_cmd);
6536 break;
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006537 case KVM_SEV_LAUNCH_FINISH:
6538 r = sev_launch_finish(kvm, &sev_cmd);
6539 break;
Brijesh Singh255d9e72017-12-04 10:57:37 -06006540 case KVM_SEV_GUEST_STATUS:
6541 r = sev_guest_status(kvm, &sev_cmd);
6542 break;
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006543 case KVM_SEV_DBG_DECRYPT:
6544 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6545 break;
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006546 case KVM_SEV_DBG_ENCRYPT:
6547 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6548 break;
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006549 case KVM_SEV_LAUNCH_SECRET:
6550 r = sev_launch_secret(kvm, &sev_cmd);
6551 break;
Brijesh Singh1654efc2017-12-04 10:57:34 -06006552 default:
6553 r = -EINVAL;
6554 goto out;
6555 }
6556
6557 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
6558 r = -EFAULT;
6559
6560out:
6561 mutex_unlock(&kvm->lock);
6562 return r;
6563}
6564
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06006565static int svm_register_enc_region(struct kvm *kvm,
6566 struct kvm_enc_region *range)
6567{
6568 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6569 struct enc_region *region;
6570 int ret = 0;
6571
6572 if (!sev_guest(kvm))
6573 return -ENOTTY;
6574
6575 region = kzalloc(sizeof(*region), GFP_KERNEL);
6576 if (!region)
6577 return -ENOMEM;
6578
6579 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
6580 if (!region->pages) {
6581 ret = -ENOMEM;
6582 goto e_free;
6583 }
6584
6585 /*
6586 * The guest may change the memory encryption attribute from C=0 -> C=1
6587 * or vice versa for this memory range. Lets make sure caches are
6588 * flushed to ensure that guest data gets written into memory with
6589 * correct C-bit.
6590 */
6591 sev_clflush_pages(region->pages, region->npages);
6592
6593 region->uaddr = range->addr;
6594 region->size = range->size;
6595
6596 mutex_lock(&kvm->lock);
6597 list_add_tail(&region->list, &sev->regions_list);
6598 mutex_unlock(&kvm->lock);
6599
6600 return ret;
6601
6602e_free:
6603 kfree(region);
6604 return ret;
6605}
6606
6607static struct enc_region *
6608find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
6609{
6610 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6611 struct list_head *head = &sev->regions_list;
6612 struct enc_region *i;
6613
6614 list_for_each_entry(i, head, list) {
6615 if (i->uaddr == range->addr &&
6616 i->size == range->size)
6617 return i;
6618 }
6619
6620 return NULL;
6621}
6622
6623
6624static int svm_unregister_enc_region(struct kvm *kvm,
6625 struct kvm_enc_region *range)
6626{
6627 struct enc_region *region;
6628 int ret;
6629
6630 mutex_lock(&kvm->lock);
6631
6632 if (!sev_guest(kvm)) {
6633 ret = -ENOTTY;
6634 goto failed;
6635 }
6636
6637 region = find_enc_region(kvm, range);
6638 if (!region) {
6639 ret = -EINVAL;
6640 goto failed;
6641 }
6642
6643 __unregister_enc_region_locked(kvm, region);
6644
6645 mutex_unlock(&kvm->lock);
6646 return 0;
6647
6648failed:
6649 mutex_unlock(&kvm->lock);
6650 return ret;
6651}
6652
Kees Cook404f6aa2016-08-08 16:29:06 -07006653static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08006654 .cpu_has_kvm_support = has_svm,
6655 .disabled_by_bios = is_disabled,
6656 .hardware_setup = svm_hardware_setup,
6657 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03006658 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006659 .hardware_enable = svm_hardware_enable,
6660 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02006661 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Paolo Bonzini6d396b52015-04-01 14:25:33 +02006662 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006663
6664 .vcpu_create = svm_create_vcpu,
6665 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006666 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006667
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006668 .vm_init = avic_vm_init,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006669 .vm_destroy = svm_vm_destroy,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006670
Avi Kivity04d2cc72007-09-10 18:10:54 +03006671 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006672 .vcpu_load = svm_vcpu_load,
6673 .vcpu_put = svm_vcpu_put,
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05006674 .vcpu_blocking = svm_vcpu_blocking,
6675 .vcpu_unblocking = svm_vcpu_unblocking,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006676
Paolo Bonzinia96036b2015-11-10 11:55:36 +01006677 .update_bp_intercept = update_bp_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006678 .get_msr = svm_get_msr,
6679 .set_msr = svm_set_msr,
6680 .get_segment_base = svm_get_segment_base,
6681 .get_segment = svm_get_segment,
6682 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02006683 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10006684 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02006685 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02006686 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03006687 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006688 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006689 .set_cr3 = svm_set_cr3,
6690 .set_cr4 = svm_set_cr4,
6691 .set_efer = svm_set_efer,
6692 .get_idt = svm_get_idt,
6693 .set_idt = svm_set_idt,
6694 .get_gdt = svm_get_gdt,
6695 .set_gdt = svm_set_gdt,
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01006696 .get_dr6 = svm_get_dr6,
6697 .set_dr6 = svm_set_dr6,
Gleb Natapov020df072010-04-13 10:05:23 +03006698 .set_dr7 = svm_set_dr7,
Paolo Bonzinifacb0132014-02-21 10:32:27 +01006699 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03006700 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006701 .get_rflags = svm_get_rflags,
6702 .set_rflags = svm_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08006703
Avi Kivity6aa8b732006-12-10 02:21:36 -08006704 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006705
Avi Kivity6aa8b732006-12-10 02:21:36 -08006706 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006707 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006708 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04006709 .set_interrupt_shadow = svm_set_interrupt_shadow,
6710 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02006711 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03006712 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006713 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02006714 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03006715 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02006716 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006717 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006718 .get_nmi_mask = svm_get_nmi_mask,
6719 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006720 .enable_nmi_window = enable_nmi_window,
6721 .enable_irq_window = enable_irq_window,
6722 .update_cr8_intercept = update_cr8_intercept,
Yang Zhang8d146952013-01-25 10:18:50 +08006723 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
Andrey Smetanind62caab2015-11-10 15:36:33 +03006724 .get_enable_apicv = svm_get_enable_apicv,
6725 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
Yang Zhangc7c9c562013-01-25 10:18:51 +08006726 .load_eoi_exitmap = svm_load_eoi_exitmap,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006727 .hwapic_irr_update = svm_hwapic_irr_update,
6728 .hwapic_isr_update = svm_hwapic_isr_update,
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05006729 .apicv_post_state_restore = avic_post_state_restore,
Izik Eiduscbc94022007-10-25 00:29:55 +02006730
6731 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08006732 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08006733 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006734
Avi Kivity586f9602010-11-18 13:09:54 +02006735 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02006736
Sheng Yang17cc3932010-01-05 19:02:27 +08006737 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08006738
6739 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08006740
6741 .rdtscp_supported = svm_rdtscp_supported,
Mao, Junjiead756a12012-07-02 01:18:48 +00006742 .invpcid_supported = svm_invpcid_supported,
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01006743 .mpx_supported = svm_mpx_supported,
Wanpeng Li55412b22014-12-02 19:21:30 +08006744 .xsaves_supported = svm_xsaves_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02006745
6746 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08006747
6748 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10006749
6750 .write_tsc_offset = svm_write_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02006751
6752 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02006753
6754 .check_intercept = svm_check_intercept,
Yang Zhanga547c6d2013-04-11 19:25:10 +08006755 .handle_external_intr = svm_handle_external_intr,
Radim Krčmářae97a3b2014-08-21 18:08:06 +02006756
6757 .sched_in = svm_sched_in,
Wei Huang25462f72015-06-19 15:45:05 +02006758
6759 .pmu_ops = &amd_pmu_ops,
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05006760 .deliver_posted_interrupt = svm_deliver_avic_intr,
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05006761 .update_pi_irte = svm_update_pi_irte,
Borislav Petkov74f16902017-03-26 23:51:24 +02006762 .setup_mce = svm_setup_mce,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006763
Ladi Prosek72d7b372017-10-11 16:54:41 +02006764 .smi_allowed = svm_smi_allowed,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006765 .pre_enter_smm = svm_pre_enter_smm,
6766 .pre_leave_smm = svm_pre_leave_smm,
Ladi Prosekcc3d9672017-10-17 16:02:39 +02006767 .enable_smi_window = enable_smi_window,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006768
6769 .mem_enc_op = svm_mem_enc_op,
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06006770 .mem_enc_reg_region = svm_register_enc_region,
6771 .mem_enc_unreg_region = svm_unregister_enc_region,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006772};
6773
6774static int __init svm_init(void)
6775{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006776 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03006777 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006778}
6779
6780static void __exit svm_exit(void)
6781{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006782 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08006783}
6784
6785module_init(svm_init)
6786module_exit(svm_exit)