blob: f350e9bac9eb0066518c3f8237f5799279d63ff0 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050017
18#define pr_fmt(fmt) "SVM: " fmt
19
Avi Kivityedf88412007-12-16 11:02:48 +020020#include <linux/kvm_host.h>
21
Eddie Dong85f455f2007-07-06 12:20:49 +030022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020025#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +010026#include "cpuid.h"
Wei Huang25462f72015-06-19 15:45:05 +020027#include "pmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040028
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070030#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020031#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080032#include <linux/vmalloc.h>
33#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040034#include <linux/sched.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040035#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -050037#include <linux/amd-iommu.h>
38#include <linux/hashtable.h>
Josh Poimboeufc207aee2017-06-28 10:11:06 -050039#include <linux/frame.h>
Brijesh Singhe9df0942017-12-04 10:57:33 -060040#include <linux/psp-sev.h>
Brijesh Singh1654efc2017-12-04 10:57:34 -060041#include <linux/file.h>
Brijesh Singh89c50582017-12-04 10:57:35 -060042#include <linux/pagemap.h>
43#include <linux/swap.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080044
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -050045#include <asm/apic.h>
Joerg Roedel1018faa2012-02-29 14:57:32 +010046#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020047#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040048#include <asm/desc.h>
Paolo Bonzinifacb0132014-02-21 10:32:27 +010049#include <asm/debugreg.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020050#include <asm/kvm_para.h>
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -050051#include <asm/irq_remapping.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080052
Eduardo Habkost63d11422008-11-17 19:03:20 -020053#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030054#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020055
Avi Kivity4ecac3f2008-05-13 13:23:38 +030056#define __ex(x) __kvm_handle_fault_on_reboot(x)
57
Avi Kivity6aa8b732006-12-10 02:21:36 -080058MODULE_AUTHOR("Qumranet");
59MODULE_LICENSE("GPL");
60
Josh Triplettae759542012-03-28 11:32:28 -070061static const struct x86_cpu_id svm_cpu_id[] = {
62 X86_FEATURE_MATCH(X86_FEATURE_SVM),
63 {}
64};
65MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
66
Avi Kivity6aa8b732006-12-10 02:21:36 -080067#define IOPM_ALLOC_ORDER 2
68#define MSRPM_ALLOC_ORDER 1
69
Avi Kivity6aa8b732006-12-10 02:21:36 -080070#define SEG_TYPE_LDT 2
71#define SEG_TYPE_BUSY_TSS16 3
72
Andre Przywara6bc31bd2010-04-11 23:07:28 +020073#define SVM_FEATURE_NPT (1 << 0)
74#define SVM_FEATURE_LBRV (1 << 1)
75#define SVM_FEATURE_SVML (1 << 2)
76#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010077#define SVM_FEATURE_TSC_RATE (1 << 4)
78#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
79#define SVM_FEATURE_FLUSH_ASID (1 << 6)
80#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020081#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030082
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -050083#define SVM_AVIC_DOORBELL 0xc001011b
84
Joerg Roedel410e4d52009-08-07 11:49:44 +020085#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
86#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
87#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
88
Joerg Roedel24e09cb2008-02-13 18:58:47 +010089#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
90
Joerg Roedelfbc0db72011-03-25 09:44:46 +010091#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010092#define TSC_RATIO_MIN 0x0000000000000001ULL
93#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010094
Dan Carpenter5446a972016-05-23 13:20:10 +030095#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050096
97/*
98 * 0xff is broadcast, so the max index allowed for physical APIC ID
99 * table is 0xfe. APIC IDs above 0xff are reserved.
100 */
101#define AVIC_MAX_PHYSICAL_ID_COUNT 255
102
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500103#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
104#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
105#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
106
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500107/* AVIC GATAG is encoded using VM and VCPU IDs */
108#define AVIC_VCPU_ID_BITS 8
109#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
110
111#define AVIC_VM_ID_BITS 24
112#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
113#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
114
115#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
116 (y & AVIC_VCPU_ID_MASK))
117#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
118#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
119
Joerg Roedel67ec6602010-05-17 14:43:35 +0200120static bool erratum_383_found __read_mostly;
121
Avi Kivity6c8166a2009-05-31 18:15:37 +0300122static const u32 host_save_user_msrs[] = {
123#ifdef CONFIG_X86_64
124 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
125 MSR_FS_BASE,
126#endif
127 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
Paolo Bonzini46896c72015-11-12 14:49:16 +0100128 MSR_TSC_AUX,
Avi Kivity6c8166a2009-05-31 18:15:37 +0300129};
130
131#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
132
133struct kvm_vcpu;
134
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200135struct nested_state {
136 struct vmcb *hsave;
137 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +0100138 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200139 u64 vmcb;
140
141 /* These are the merged vectors */
142 u32 *msrpm;
143
144 /* gpa pointers to the real vectors */
145 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +0100146 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200147
Joerg Roedelcd3ff652009-10-09 16:08:26 +0200148 /* A VMEXIT is required but not yet emulated */
149 bool exit_required;
150
Joerg Roedelaad42c62009-08-07 11:49:34 +0200151 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100152 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100153 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200154 u32 intercept_exceptions;
155 u64 intercept;
156
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200157 /* Nested Paging related state */
158 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200159};
160
Joerg Roedel323c3d82010-03-01 15:34:37 +0100161#define MSRPM_OFFSETS 16
162static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
163
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500164/*
165 * Set osvw_len to higher value when updated Revision Guides
166 * are published and we know what the new status bits are
167 */
168static uint64_t osvw_len = 4, osvw_status;
169
Avi Kivity6c8166a2009-05-31 18:15:37 +0300170struct vcpu_svm {
171 struct kvm_vcpu vcpu;
172 struct vmcb *vmcb;
173 unsigned long vmcb_pa;
174 struct svm_cpu_data *svm_data;
175 uint64_t asid_generation;
176 uint64_t sysenter_esp;
177 uint64_t sysenter_eip;
Paolo Bonzini46896c72015-11-12 14:49:16 +0100178 uint64_t tsc_aux;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300179
180 u64 next_rip;
181
182 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200183 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200184 u16 fs;
185 u16 gs;
186 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200187 u64 gs_base;
188 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300189
190 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300191
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200192 ulong nmi_iret_rip;
193
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200194 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200195
196 bool nmi_singlestep;
Ladi Prosekab2f4d732017-06-21 09:06:58 +0200197 u64 nmi_singlestep_guest_rflags;
Jan Kiszka66b71382010-02-23 17:47:56 +0100198
199 unsigned int3_injected;
200 unsigned long int3_rip;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100201
Joerg Roedel6092d3d2015-10-14 15:10:54 +0200202 /* cached guest cpuid flags for faster access */
203 bool nrips_enabled : 1;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500204
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500205 u32 ldr_reg;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500206 struct page *avic_backing_page;
207 u64 *avic_physical_id_cache;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -0500208 bool avic_is_running;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500209
210 /*
211 * Per-vcpu list of struct amd_svm_iommu_ir:
212 * This is used mainly to store interrupt remapping information used
213 * when update the vcpu affinity. This avoids the need to scan for
214 * IRTE and try to match ga_tag in the IOMMU driver.
215 */
216 struct list_head ir_list;
217 spinlock_t ir_list_lock;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600218
219 /* which host CPU was used for running this vcpu */
220 unsigned int last_cpu;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500221};
222
223/*
224 * This is a wrapper of struct amd_iommu_ir_data.
225 */
226struct amd_svm_iommu_ir {
227 struct list_head node; /* Used by SVM for per-vcpu ir_list */
228 void *data; /* Storing pointer to struct amd_ir_data */
Avi Kivity6c8166a2009-05-31 18:15:37 +0300229};
230
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500231#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
232#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
233
234#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
235#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
236#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
237#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
238
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100239static DEFINE_PER_CPU(u64, current_tsc_ratio);
240#define TSC_RATIO_DEFAULT 0x0100000000ULL
241
Joerg Roedel455716f2010-03-01 15:34:35 +0100242#define MSR_INVALID 0xffffffffU
243
Mathias Krause09941fb2012-08-30 01:30:20 +0200244static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100245 u32 index; /* Index of the MSR */
246 bool always; /* True if intercept is always on */
247} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400248 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100249 { .index = MSR_IA32_SYSENTER_CS, .always = true },
250#ifdef CONFIG_X86_64
251 { .index = MSR_GS_BASE, .always = true },
252 { .index = MSR_FS_BASE, .always = true },
253 { .index = MSR_KERNEL_GS_BASE, .always = true },
254 { .index = MSR_LSTAR, .always = true },
255 { .index = MSR_CSTAR, .always = true },
256 { .index = MSR_SYSCALL_MASK, .always = true },
257#endif
258 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
259 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
260 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
261 { .index = MSR_IA32_LASTINTTOIP, .always = false },
262 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263};
264
265/* enable NPT for AMD64 and X86 with PAE */
266#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
267static bool npt_enabled = true;
268#else
Joerg Roedele0231712010-02-24 18:59:10 +0100269static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270#endif
271
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100272/* allow nested paging (virtualized MMU) for all guests */
273static int npt = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800274module_param(npt, int, S_IRUGO);
275
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100276/* allow nested virtualization in KVM/SVM */
277static int nested = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800278module_param(nested, int, S_IRUGO);
279
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500280/* enable / disable AVIC */
281static int avic;
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500282#ifdef CONFIG_X86_LOCAL_APIC
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500283module_param(avic, int, S_IRUGO);
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500284#endif
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500285
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500286/* enable/disable Virtual VMLOAD VMSAVE */
287static int vls = true;
288module_param(vls, int, 0444);
289
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500290/* enable/disable Virtual GIF */
291static int vgif = true;
292module_param(vgif, int, 0444);
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500293
Brijesh Singhe9df0942017-12-04 10:57:33 -0600294/* enable/disable SEV support */
295static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
296module_param(sev, int, 0444);
297
Paolo Bonzini79a80592015-09-21 07:46:55 +0200298static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800299static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200300static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800301
Joerg Roedel410e4d52009-08-07 11:49:44 +0200302static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100303static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800304static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800305static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
306 bool has_error_code, u32 error_code);
307
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100308enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100309 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
310 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100311 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100312 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100313 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100314 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100315 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100316 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100317 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100318 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100319 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100320 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500321 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
322 * AVIC PHYSICAL_TABLE pointer,
323 * AVIC LOGICAL_TABLE pointer
324 */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100325 VMCB_DIRTY_MAX,
326};
327
Joerg Roedel0574dec2010-12-03 11:45:58 +0100328/* TPR and CR2 are always written before VMRUN */
329#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100330
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500331#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
332
Brijesh Singhed3cd232017-12-04 10:57:32 -0600333static unsigned int max_sev_asid;
Brijesh Singh1654efc2017-12-04 10:57:34 -0600334static unsigned int min_sev_asid;
335static unsigned long *sev_asid_bitmap;
Brijesh Singh89c50582017-12-04 10:57:35 -0600336#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
Brijesh Singh1654efc2017-12-04 10:57:34 -0600337
Brijesh Singh1e80fdc2017-12-04 10:57:38 -0600338struct enc_region {
339 struct list_head list;
340 unsigned long npages;
341 struct page **pages;
342 unsigned long uaddr;
343 unsigned long size;
344};
345
Brijesh Singh1654efc2017-12-04 10:57:34 -0600346static inline bool svm_sev_enabled(void)
347{
348 return max_sev_asid;
349}
350
351static inline bool sev_guest(struct kvm *kvm)
352{
353 struct kvm_sev_info *sev = &kvm->arch.sev_info;
354
355 return sev->active;
356}
Brijesh Singhed3cd232017-12-04 10:57:32 -0600357
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600358static inline int sev_get_asid(struct kvm *kvm)
359{
360 struct kvm_sev_info *sev = &kvm->arch.sev_info;
361
362 return sev->asid;
363}
364
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100365static inline void mark_all_dirty(struct vmcb *vmcb)
366{
367 vmcb->control.clean = 0;
368}
369
370static inline void mark_all_clean(struct vmcb *vmcb)
371{
372 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
373 & ~VMCB_ALWAYS_DIRTY_MASK;
374}
375
376static inline void mark_dirty(struct vmcb *vmcb, int bit)
377{
378 vmcb->control.clean &= ~(1 << bit);
379}
380
Avi Kivity6aa8b732006-12-10 02:21:36 -0800381static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
382{
383 return container_of(vcpu, struct vcpu_svm, vcpu);
384}
385
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500386static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
387{
388 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
389 mark_dirty(svm->vmcb, VMCB_AVIC);
390}
391
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -0500392static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
393{
394 struct vcpu_svm *svm = to_svm(vcpu);
395 u64 *entry = svm->avic_physical_id_cache;
396
397 if (!entry)
398 return false;
399
400 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
401}
402
Joerg Roedel384c6362010-11-30 18:03:56 +0100403static void recalc_intercepts(struct vcpu_svm *svm)
404{
405 struct vmcb_control_area *c, *h;
406 struct nested_state *g;
407
Joerg Roedel116a0a22010-12-03 11:45:49 +0100408 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
409
Joerg Roedel384c6362010-11-30 18:03:56 +0100410 if (!is_guest_mode(&svm->vcpu))
411 return;
412
413 c = &svm->vmcb->control;
414 h = &svm->nested.hsave->control;
415 g = &svm->nested;
416
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100417 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100418 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100419 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
420 c->intercept = h->intercept | g->intercept;
421}
422
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100423static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
424{
425 if (is_guest_mode(&svm->vcpu))
426 return svm->nested.hsave;
427 else
428 return svm->vmcb;
429}
430
431static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
432{
433 struct vmcb *vmcb = get_host_vmcb(svm);
434
435 vmcb->control.intercept_cr |= (1U << bit);
436
437 recalc_intercepts(svm);
438}
439
440static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
441{
442 struct vmcb *vmcb = get_host_vmcb(svm);
443
444 vmcb->control.intercept_cr &= ~(1U << bit);
445
446 recalc_intercepts(svm);
447}
448
449static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
450{
451 struct vmcb *vmcb = get_host_vmcb(svm);
452
453 return vmcb->control.intercept_cr & (1U << bit);
454}
455
Paolo Bonzini5315c712014-03-03 13:08:29 +0100456static inline void set_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100457{
458 struct vmcb *vmcb = get_host_vmcb(svm);
459
Paolo Bonzini5315c712014-03-03 13:08:29 +0100460 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
461 | (1 << INTERCEPT_DR1_READ)
462 | (1 << INTERCEPT_DR2_READ)
463 | (1 << INTERCEPT_DR3_READ)
464 | (1 << INTERCEPT_DR4_READ)
465 | (1 << INTERCEPT_DR5_READ)
466 | (1 << INTERCEPT_DR6_READ)
467 | (1 << INTERCEPT_DR7_READ)
468 | (1 << INTERCEPT_DR0_WRITE)
469 | (1 << INTERCEPT_DR1_WRITE)
470 | (1 << INTERCEPT_DR2_WRITE)
471 | (1 << INTERCEPT_DR3_WRITE)
472 | (1 << INTERCEPT_DR4_WRITE)
473 | (1 << INTERCEPT_DR5_WRITE)
474 | (1 << INTERCEPT_DR6_WRITE)
475 | (1 << INTERCEPT_DR7_WRITE);
Joerg Roedel3aed0412010-11-30 18:03:58 +0100476
477 recalc_intercepts(svm);
478}
479
Paolo Bonzini5315c712014-03-03 13:08:29 +0100480static inline void clr_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100481{
482 struct vmcb *vmcb = get_host_vmcb(svm);
483
Paolo Bonzini5315c712014-03-03 13:08:29 +0100484 vmcb->control.intercept_dr = 0;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100485
486 recalc_intercepts(svm);
487}
488
Joerg Roedel18c918c2010-11-30 18:03:59 +0100489static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
490{
491 struct vmcb *vmcb = get_host_vmcb(svm);
492
493 vmcb->control.intercept_exceptions |= (1U << bit);
494
495 recalc_intercepts(svm);
496}
497
498static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
499{
500 struct vmcb *vmcb = get_host_vmcb(svm);
501
502 vmcb->control.intercept_exceptions &= ~(1U << bit);
503
504 recalc_intercepts(svm);
505}
506
Joerg Roedel8a05a1b82010-11-30 18:04:00 +0100507static inline void set_intercept(struct vcpu_svm *svm, int bit)
508{
509 struct vmcb *vmcb = get_host_vmcb(svm);
510
511 vmcb->control.intercept |= (1ULL << bit);
512
513 recalc_intercepts(svm);
514}
515
516static inline void clr_intercept(struct vcpu_svm *svm, int bit)
517{
518 struct vmcb *vmcb = get_host_vmcb(svm);
519
520 vmcb->control.intercept &= ~(1ULL << bit);
521
522 recalc_intercepts(svm);
523}
524
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500525static inline bool vgif_enabled(struct vcpu_svm *svm)
526{
527 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
528}
529
Joerg Roedel2af91942009-08-07 11:49:28 +0200530static inline void enable_gif(struct vcpu_svm *svm)
531{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500532 if (vgif_enabled(svm))
533 svm->vmcb->control.int_ctl |= V_GIF_MASK;
534 else
535 svm->vcpu.arch.hflags |= HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200536}
537
538static inline void disable_gif(struct vcpu_svm *svm)
539{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500540 if (vgif_enabled(svm))
541 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
542 else
543 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200544}
545
546static inline bool gif_set(struct vcpu_svm *svm)
547{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500548 if (vgif_enabled(svm))
549 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
550 else
551 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
Joerg Roedel2af91942009-08-07 11:49:28 +0200552}
553
Avi Kivity6aa8b732006-12-10 02:21:36 -0800554static unsigned long iopm_base;
555
556struct kvm_ldttss_desc {
557 u16 limit0;
558 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100559 unsigned base1:8, type:5, dpl:2, p:1;
560 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800561 u32 base3;
562 u32 zero1;
563} __attribute__((packed));
564
565struct svm_cpu_data {
566 int cpu;
567
Avi Kivity5008fdf2007-04-02 13:05:50 +0300568 u64 asid_generation;
569 u32 max_asid;
570 u32 next_asid;
Brijesh Singh4faefff2017-12-04 10:57:25 -0600571 u32 min_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800572 struct kvm_ldttss_desc *tss_desc;
573
574 struct page *save_area;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600575
576 /* index = sev_asid, value = vmcb pointer */
577 struct vmcb **sev_vmcbs;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800578};
579
580static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
581
582struct svm_init_data {
583 int cpu;
584 int r;
585};
586
Mathias Krause09941fb2012-08-30 01:30:20 +0200587static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800588
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200589#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800590#define MSRS_RANGE_SIZE 2048
591#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
592
Joerg Roedel455716f2010-03-01 15:34:35 +0100593static u32 svm_msrpm_offset(u32 msr)
594{
595 u32 offset;
596 int i;
597
598 for (i = 0; i < NUM_MSR_MAPS; i++) {
599 if (msr < msrpm_ranges[i] ||
600 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
601 continue;
602
603 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
604 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
605
606 /* Now we have the u8 offset - but need the u32 offset */
607 return offset / 4;
608 }
609
610 /* MSR not in any range */
611 return MSR_INVALID;
612}
613
Avi Kivity6aa8b732006-12-10 02:21:36 -0800614#define MAX_INST_SIZE 15
615
Avi Kivity6aa8b732006-12-10 02:21:36 -0800616static inline void clgi(void)
617{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300618 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800619}
620
621static inline void stgi(void)
622{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300623 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800624}
625
626static inline void invlpga(unsigned long addr, u32 asid)
627{
Joerg Roedele0231712010-02-24 18:59:10 +0100628 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800629}
630
Yu Zhang855feb62017-08-24 20:27:55 +0800631static int get_npt_level(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +0200632{
633#ifdef CONFIG_X86_64
Yu Zhang2a7266a2017-08-24 20:27:54 +0800634 return PT64_ROOT_4LEVEL;
Joerg Roedel4b161842010-09-10 17:31:03 +0200635#else
636 return PT32E_ROOT_LEVEL;
637#endif
638}
639
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
641{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000642 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100643 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600644 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800645
Alexander Graf9962d032008-11-25 20:17:02 +0100646 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100647 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800648}
649
Avi Kivity6aa8b732006-12-10 02:21:36 -0800650static int is_external_interrupt(u32 info)
651{
652 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
653 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
654}
655
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200656static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -0400657{
658 struct vcpu_svm *svm = to_svm(vcpu);
659 u32 ret = 0;
660
661 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200662 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
663 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400664}
665
666static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
667{
668 struct vcpu_svm *svm = to_svm(vcpu);
669
670 if (mask == 0)
671 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
672 else
673 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
674
675}
676
Avi Kivity6aa8b732006-12-10 02:21:36 -0800677static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
678{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400679 struct vcpu_svm *svm = to_svm(vcpu);
680
Bandan Dasf1047652015-06-11 02:05:33 -0400681 if (svm->vmcb->control.next_rip != 0) {
Dirk Müllerd2922422015-10-01 13:43:42 +0200682 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200683 svm->next_rip = svm->vmcb->control.next_rip;
Bandan Dasf1047652015-06-11 02:05:33 -0400684 }
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200685
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400686 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100687 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300688 EMULATE_DONE)
689 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800690 return;
691 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300692 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
693 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
694 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800695
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300696 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400697 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800698}
699
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700700static void svm_queue_exception(struct kvm_vcpu *vcpu)
Jan Kiszka116a4752010-02-23 17:47:54 +0100701{
702 struct vcpu_svm *svm = to_svm(vcpu);
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700703 unsigned nr = vcpu->arch.exception.nr;
704 bool has_error_code = vcpu->arch.exception.has_error_code;
Wanpeng Li664f8e22017-08-24 03:35:09 -0700705 bool reinject = vcpu->arch.exception.injected;
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700706 u32 error_code = vcpu->arch.exception.error_code;
Jan Kiszka116a4752010-02-23 17:47:54 +0100707
Joerg Roedele0231712010-02-24 18:59:10 +0100708 /*
709 * If we are within a nested VM we'd better #VMEXIT and let the guest
710 * handle the exception
711 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200712 if (!reinject &&
713 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100714 return;
715
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200716 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100717 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
718
719 /*
720 * For guest debugging where we have to reinject #BP if some
721 * INT3 is guest-owned:
722 * Emulate nRIP by moving RIP forward. Will fail if injection
723 * raises a fault that is not intercepted. Still better than
724 * failing in all cases.
725 */
726 skip_emulated_instruction(&svm->vcpu);
727 rip = kvm_rip_read(&svm->vcpu);
728 svm->int3_rip = rip + svm->vmcb->save.cs.base;
729 svm->int3_injected = rip - old_rip;
730 }
731
Jan Kiszka116a4752010-02-23 17:47:54 +0100732 svm->vmcb->control.event_inj = nr
733 | SVM_EVTINJ_VALID
734 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
735 | SVM_EVTINJ_TYPE_EXEPT;
736 svm->vmcb->control.event_inj_err = error_code;
737}
738
Joerg Roedel67ec6602010-05-17 14:43:35 +0200739static void svm_init_erratum_383(void)
740{
741 u32 low, high;
742 int err;
743 u64 val;
744
Borislav Petkove6ee94d2013-03-20 15:07:27 +0100745 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200746 return;
747
748 /* Use _safe variants to not break nested virtualization */
749 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
750 if (err)
751 return;
752
753 val |= (1ULL << 47);
754
755 low = lower_32_bits(val);
756 high = upper_32_bits(val);
757
758 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
759
760 erratum_383_found = true;
761}
762
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500763static void svm_init_osvw(struct kvm_vcpu *vcpu)
764{
765 /*
766 * Guests should see errata 400 and 415 as fixed (assuming that
767 * HLT and IO instructions are intercepted).
768 */
769 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
770 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
771
772 /*
773 * By increasing VCPU's osvw.length to 3 we are telling the guest that
774 * all osvw.status bits inside that length, including bit 0 (which is
775 * reserved for erratum 298), are valid. However, if host processor's
776 * osvw_len is 0 then osvw_status[0] carries no information. We need to
777 * be conservative here and therefore we tell the guest that erratum 298
778 * is present (because we really don't know).
779 */
780 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
781 vcpu->arch.osvw.status |= 1;
782}
783
Avi Kivity6aa8b732006-12-10 02:21:36 -0800784static int has_svm(void)
785{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200786 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800787
Eduardo Habkost63d11422008-11-17 19:03:20 -0200788 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800789 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800790 return 0;
791 }
792
Avi Kivity6aa8b732006-12-10 02:21:36 -0800793 return 1;
794}
795
Radim Krčmář13a34e02014-08-28 15:13:03 +0200796static void svm_hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800797{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100798 /* Make sure we clean up behind us */
799 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
800 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
801
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200802 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100803
804 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800805}
806
Radim Krčmář13a34e02014-08-28 15:13:03 +0200807static int svm_hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800808{
809
Tejun Heo0fe1e002009-10-29 22:34:14 +0900810 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800811 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800812 struct desc_struct *gdt;
813 int me = raw_smp_processor_id();
814
Alexander Graf10474ae2009-09-15 11:37:46 +0200815 rdmsrl(MSR_EFER, efer);
816 if (efer & EFER_SVME)
817 return -EBUSY;
818
Avi Kivity6aa8b732006-12-10 02:21:36 -0800819 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200820 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200821 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800822 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900823 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900824 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200825 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200826 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800827 }
828
Tejun Heo0fe1e002009-10-29 22:34:14 +0900829 sd->asid_generation = 1;
830 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
831 sd->next_asid = sd->max_asid + 1;
Brijesh Singhed3cd232017-12-04 10:57:32 -0600832 sd->min_asid = max_sev_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800833
Thomas Garnier45fc8752017-03-14 10:05:08 -0700834 gdt = get_current_gdt_rw();
Tejun Heo0fe1e002009-10-29 22:34:14 +0900835 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800836
Alexander Graf9962d032008-11-25 20:17:02 +0100837 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838
Linus Torvaldsd0316552009-12-14 09:58:24 -0800839 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200840
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100841 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
842 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500843 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100844 }
845
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500846
847 /*
848 * Get OSVW bits.
849 *
850 * Note that it is possible to have a system with mixed processor
851 * revisions and therefore different OSVW bits. If bits are not the same
852 * on different processors then choose the worst case (i.e. if erratum
853 * is present on one processor and not on another then assume that the
854 * erratum is present everywhere).
855 */
856 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
857 uint64_t len, status = 0;
858 int err;
859
860 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
861 if (!err)
862 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
863 &err);
864
865 if (err)
866 osvw_status = osvw_len = 0;
867 else {
868 if (len < osvw_len)
869 osvw_len = len;
870 osvw_status |= status;
871 osvw_status &= (1ULL << osvw_len) - 1;
872 }
873 } else
874 osvw_status = osvw_len = 0;
875
Joerg Roedel67ec6602010-05-17 14:43:35 +0200876 svm_init_erratum_383();
877
Joerg Roedel1018faa2012-02-29 14:57:32 +0100878 amd_pmu_enable_virt();
879
Alexander Graf10474ae2009-09-15 11:37:46 +0200880 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800881}
882
Joerg Roedel0da1db752008-07-02 16:02:11 +0200883static void svm_cpu_uninit(int cpu)
884{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900885 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200886
Tejun Heo0fe1e002009-10-29 22:34:14 +0900887 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200888 return;
889
890 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600891 kfree(sd->sev_vmcbs);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900892 __free_page(sd->save_area);
893 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200894}
895
Avi Kivity6aa8b732006-12-10 02:21:36 -0800896static int svm_cpu_init(int cpu)
897{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900898 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800899 int r;
900
Tejun Heo0fe1e002009-10-29 22:34:14 +0900901 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
902 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800903 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900904 sd->cpu = cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800905 r = -ENOMEM;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600906 sd->save_area = alloc_page(GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900907 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800908 goto err_1;
909
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600910 if (svm_sev_enabled()) {
911 r = -ENOMEM;
912 sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
913 if (!sd->sev_vmcbs)
914 goto err_1;
915 }
916
Tejun Heo0fe1e002009-10-29 22:34:14 +0900917 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918
919 return 0;
920
921err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900922 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800923 return r;
924
925}
926
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100927static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800928{
929 int i;
930
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100931 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
932 if (direct_access_msrs[i].index == index)
933 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800934
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100935 return false;
936}
937
Avi Kivity6aa8b732006-12-10 02:21:36 -0800938static void set_msr_interception(u32 *msrpm, unsigned msr,
939 int read, int write)
940{
Joerg Roedel455716f2010-03-01 15:34:35 +0100941 u8 bit_read, bit_write;
942 unsigned long tmp;
943 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800944
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100945 /*
946 * If this warning triggers extend the direct_access_msrs list at the
947 * beginning of the file
948 */
949 WARN_ON(!valid_msr_intercept(msr));
950
Joerg Roedel455716f2010-03-01 15:34:35 +0100951 offset = svm_msrpm_offset(msr);
952 bit_read = 2 * (msr & 0x0f);
953 bit_write = 2 * (msr & 0x0f) + 1;
954 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800955
Joerg Roedel455716f2010-03-01 15:34:35 +0100956 BUG_ON(offset == MSR_INVALID);
957
958 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
959 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
960
961 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800962}
963
Joerg Roedelf65c2292008-02-13 18:58:46 +0100964static void svm_vcpu_init_msrpm(u32 *msrpm)
965{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100966 int i;
967
Joerg Roedelf65c2292008-02-13 18:58:46 +0100968 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
969
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100970 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
971 if (!direct_access_msrs[i].always)
972 continue;
973
974 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
975 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100976}
977
Joerg Roedel323c3d82010-03-01 15:34:37 +0100978static void add_msr_offset(u32 offset)
979{
980 int i;
981
982 for (i = 0; i < MSRPM_OFFSETS; ++i) {
983
984 /* Offset already in list? */
985 if (msrpm_offsets[i] == offset)
986 return;
987
988 /* Slot used by another offset? */
989 if (msrpm_offsets[i] != MSR_INVALID)
990 continue;
991
992 /* Add offset to list */
993 msrpm_offsets[i] = offset;
994
995 return;
996 }
997
998 /*
999 * If this BUG triggers the msrpm_offsets table has an overflow. Just
1000 * increase MSRPM_OFFSETS in this case.
1001 */
1002 BUG();
1003}
1004
1005static void init_msrpm_offsets(void)
1006{
1007 int i;
1008
1009 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1010
1011 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1012 u32 offset;
1013
1014 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1015 BUG_ON(offset == MSR_INVALID);
1016
1017 add_msr_offset(offset);
1018 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001019}
1020
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001021static void svm_enable_lbrv(struct vcpu_svm *svm)
1022{
1023 u32 *msrpm = svm->msrpm;
1024
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001025 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001026 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1027 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1028 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1029 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1030}
1031
1032static void svm_disable_lbrv(struct vcpu_svm *svm)
1033{
1034 u32 *msrpm = svm->msrpm;
1035
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001036 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001037 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1038 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1039 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1040 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1041}
1042
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001043static void disable_nmi_singlestep(struct vcpu_svm *svm)
1044{
1045 svm->nmi_singlestep = false;
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001046
Ladi Prosekab2f4d732017-06-21 09:06:58 +02001047 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1048 /* Clear our flags if they were not set by the guest */
1049 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1050 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1051 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1052 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1053 }
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001054}
1055
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001056/* Note:
1057 * This hash table is used to map VM_ID to a struct kvm_arch,
1058 * when handling AMD IOMMU GALOG notification to schedule in
1059 * a particular vCPU.
1060 */
1061#define SVM_VM_DATA_HASH_BITS 8
David Hildenbrand681bcea2017-01-24 22:21:16 +01001062static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001063static u32 next_vm_id = 0;
1064static bool next_vm_id_wrapped = 0;
David Hildenbrand681bcea2017-01-24 22:21:16 +01001065static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001066
1067/* Note:
1068 * This function is called from IOMMU driver to notify
1069 * SVM to schedule in a particular vCPU of a particular VM.
1070 */
1071static int avic_ga_log_notifier(u32 ga_tag)
1072{
1073 unsigned long flags;
1074 struct kvm_arch *ka = NULL;
1075 struct kvm_vcpu *vcpu = NULL;
1076 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1077 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1078
1079 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1080
1081 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1082 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1083 struct kvm *kvm = container_of(ka, struct kvm, arch);
1084 struct kvm_arch *vm_data = &kvm->arch;
1085
1086 if (vm_data->avic_vm_id != vm_id)
1087 continue;
1088 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1089 break;
1090 }
1091 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1092
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001093 /* Note:
1094 * At this point, the IOMMU should have already set the pending
1095 * bit in the vAPIC backing page. So, we just need to schedule
1096 * in the vcpu.
1097 */
Paolo Bonzini1cf53582017-10-10 12:51:56 +02001098 if (vcpu)
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001099 kvm_vcpu_wake_up(vcpu);
1100
1101 return 0;
1102}
1103
Brijesh Singhe9df0942017-12-04 10:57:33 -06001104static __init int sev_hardware_setup(void)
1105{
1106 struct sev_user_data_status *status;
1107 int rc;
1108
1109 /* Maximum number of encrypted guests supported simultaneously */
1110 max_sev_asid = cpuid_ecx(0x8000001F);
1111
1112 if (!max_sev_asid)
1113 return 1;
1114
Brijesh Singh1654efc2017-12-04 10:57:34 -06001115 /* Minimum ASID value that should be used for SEV guest */
1116 min_sev_asid = cpuid_edx(0x8000001F);
1117
1118 /* Initialize SEV ASID bitmap */
1119 sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
1120 sizeof(unsigned long), GFP_KERNEL);
1121 if (!sev_asid_bitmap)
1122 return 1;
1123
Brijesh Singhe9df0942017-12-04 10:57:33 -06001124 status = kmalloc(sizeof(*status), GFP_KERNEL);
1125 if (!status)
1126 return 1;
1127
1128 /*
1129 * Check SEV platform status.
1130 *
1131 * PLATFORM_STATUS can be called in any state, if we failed to query
1132 * the PLATFORM status then either PSP firmware does not support SEV
1133 * feature or SEV firmware is dead.
1134 */
1135 rc = sev_platform_status(status, NULL);
1136 if (rc)
1137 goto err;
1138
1139 pr_info("SEV supported\n");
1140
1141err:
1142 kfree(status);
1143 return rc;
1144}
1145
Avi Kivity6aa8b732006-12-10 02:21:36 -08001146static __init int svm_hardware_setup(void)
1147{
1148 int cpu;
1149 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001150 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001151 int r;
1152
Avi Kivity6aa8b732006-12-10 02:21:36 -08001153 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1154
1155 if (!iopm_pages)
1156 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +03001157
1158 iopm_va = page_address(iopm_pages);
1159 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001160 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1161
Joerg Roedel323c3d82010-03-01 15:34:37 +01001162 init_msrpm_offsets();
1163
Joerg Roedel50a37eb2008-01-31 14:57:38 +01001164 if (boot_cpu_has(X86_FEATURE_NX))
1165 kvm_enable_efer_bits(EFER_NX);
1166
Alexander Graf1b2fd702009-02-02 16:23:51 +01001167 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1168 kvm_enable_efer_bits(EFER_FFXSR);
1169
Joerg Roedel92a1f122011-03-25 09:44:51 +01001170 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
Joerg Roedel92a1f122011-03-25 09:44:51 +01001171 kvm_has_tsc_control = true;
Haozhong Zhangbc9b9612015-10-20 15:39:01 +08001172 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1173 kvm_tsc_scaling_ratio_frac_bits = 32;
Joerg Roedel92a1f122011-03-25 09:44:51 +01001174 }
1175
Alexander Graf236de052008-11-25 20:17:10 +01001176 if (nested) {
1177 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +02001178 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +01001179 }
1180
Brijesh Singhe9df0942017-12-04 10:57:33 -06001181 if (sev) {
1182 if (boot_cpu_has(X86_FEATURE_SEV) &&
1183 IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1184 r = sev_hardware_setup();
1185 if (r)
1186 sev = false;
1187 } else {
1188 sev = false;
1189 }
1190 }
1191
Zachary Amsden3230bb42009-09-29 11:38:37 -10001192 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001193 r = svm_cpu_init(cpu);
1194 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +01001195 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001196 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +01001197
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001198 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001199 npt_enabled = false;
1200
Joerg Roedel6c7dac72008-02-07 13:47:40 +01001201 if (npt_enabled && !npt) {
1202 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1203 npt_enabled = false;
1204 }
1205
Joerg Roedel18552672008-02-07 13:47:41 +01001206 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001207 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +01001208 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +02001209 } else
1210 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001211
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001212 if (avic) {
1213 if (!npt_enabled ||
1214 !boot_cpu_has(X86_FEATURE_AVIC) ||
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001215 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001216 avic = false;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001217 } else {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001218 pr_info("AVIC enabled\n");
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001219
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001220 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1221 }
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001222 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001223
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001224 if (vls) {
1225 if (!npt_enabled ||
Borislav Petkov5442c262017-08-01 20:55:52 +02001226 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001227 !IS_ENABLED(CONFIG_X86_64)) {
1228 vls = false;
1229 } else {
1230 pr_info("Virtual VMLOAD VMSAVE supported\n");
1231 }
1232 }
1233
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001234 if (vgif) {
1235 if (!boot_cpu_has(X86_FEATURE_VGIF))
1236 vgif = false;
1237 else
1238 pr_info("Virtual GIF supported\n");
1239 }
1240
Avi Kivity6aa8b732006-12-10 02:21:36 -08001241 return 0;
1242
Joerg Roedelf65c2292008-02-13 18:58:46 +01001243err:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001244 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1245 iopm_base = 0;
1246 return r;
1247}
1248
1249static __exit void svm_hardware_unsetup(void)
1250{
Joerg Roedel0da1db752008-07-02 16:02:11 +02001251 int cpu;
1252
Brijesh Singh1654efc2017-12-04 10:57:34 -06001253 if (svm_sev_enabled())
1254 kfree(sev_asid_bitmap);
1255
Zachary Amsden3230bb42009-09-29 11:38:37 -10001256 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +02001257 svm_cpu_uninit(cpu);
1258
Avi Kivity6aa8b732006-12-10 02:21:36 -08001259 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +01001260 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001261}
1262
1263static void init_seg(struct vmcb_seg *seg)
1264{
1265 seg->selector = 0;
1266 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +01001267 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001268 seg->limit = 0xffff;
1269 seg->base = 0;
1270}
1271
1272static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1273{
1274 seg->selector = 0;
1275 seg->attrib = SVM_SELECTOR_P_MASK | type;
1276 seg->limit = 0xffff;
1277 seg->base = 0;
1278}
1279
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001280static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1281{
1282 struct vcpu_svm *svm = to_svm(vcpu);
1283 u64 g_tsc_offset = 0;
1284
Joerg Roedel20307532010-11-29 17:51:48 +01001285 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001286 g_tsc_offset = svm->vmcb->control.tsc_offset -
1287 svm->nested.hsave->control.tsc_offset;
1288 svm->nested.hsave->control.tsc_offset = offset;
Yoshihiro YUNOMAE489223e2013-06-12 16:43:44 +09001289 } else
1290 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1291 svm->vmcb->control.tsc_offset,
1292 offset);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001293
1294 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001295
1296 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001297}
1298
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001299static void avic_init_vmcb(struct vcpu_svm *svm)
1300{
1301 struct vmcb *vmcb = svm->vmcb;
1302 struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001303 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1304 phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
1305 phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001306
1307 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1308 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1309 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1310 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1311 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001312}
1313
Paolo Bonzini56908912015-10-19 11:30:19 +02001314static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001315{
Joerg Roedele6101a92008-02-13 18:58:45 +01001316 struct vmcb_control_area *control = &svm->vmcb->control;
1317 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001318
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001319 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +02001320
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001321 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1322 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1323 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1324 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1325 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1326 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05001327 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1328 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001329
Paolo Bonzini5315c712014-03-03 13:08:29 +01001330 set_dr_intercepts(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001331
Joerg Roedel18c918c2010-11-30 18:03:59 +01001332 set_exception_intercept(svm, PF_VECTOR);
1333 set_exception_intercept(svm, UD_VECTOR);
1334 set_exception_intercept(svm, MC_VECTOR);
Eric Northup54a20552015-11-03 18:03:53 +01001335 set_exception_intercept(svm, AC_VECTOR);
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001336 set_exception_intercept(svm, DB_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001337
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001338 set_intercept(svm, INTERCEPT_INTR);
1339 set_intercept(svm, INTERCEPT_NMI);
1340 set_intercept(svm, INTERCEPT_SMI);
1341 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001342 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001343 set_intercept(svm, INTERCEPT_CPUID);
1344 set_intercept(svm, INTERCEPT_INVD);
1345 set_intercept(svm, INTERCEPT_HLT);
1346 set_intercept(svm, INTERCEPT_INVLPG);
1347 set_intercept(svm, INTERCEPT_INVLPGA);
1348 set_intercept(svm, INTERCEPT_IOIO_PROT);
1349 set_intercept(svm, INTERCEPT_MSR_PROT);
1350 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1351 set_intercept(svm, INTERCEPT_SHUTDOWN);
1352 set_intercept(svm, INTERCEPT_VMRUN);
1353 set_intercept(svm, INTERCEPT_VMMCALL);
1354 set_intercept(svm, INTERCEPT_VMLOAD);
1355 set_intercept(svm, INTERCEPT_VMSAVE);
1356 set_intercept(svm, INTERCEPT_STGI);
1357 set_intercept(svm, INTERCEPT_CLGI);
1358 set_intercept(svm, INTERCEPT_SKINIT);
1359 set_intercept(svm, INTERCEPT_WBINVD);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001360 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001361
Michael S. Tsirkin668fffa2017-04-21 12:27:17 +02001362 if (!kvm_mwait_in_guest()) {
1363 set_intercept(svm, INTERCEPT_MONITOR);
1364 set_intercept(svm, INTERCEPT_MWAIT);
1365 }
1366
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001367 control->iopm_base_pa = __sme_set(iopm_base);
1368 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001369 control->int_ctl = V_INTR_MASKING_MASK;
1370
1371 init_seg(&save->es);
1372 init_seg(&save->ss);
1373 init_seg(&save->ds);
1374 init_seg(&save->fs);
1375 init_seg(&save->gs);
1376
1377 save->cs.selector = 0xf000;
Paolo Bonzini04b66832013-03-19 16:30:26 +01001378 save->cs.base = 0xffff0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001379 /* Executable/Readable Code Segment */
1380 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1381 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1382 save->cs.limit = 0xffff;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001383
1384 save->gdtr.limit = 0xffff;
1385 save->idtr.limit = 0xffff;
1386
1387 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1388 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1389
Paolo Bonzini56908912015-10-19 11:30:19 +02001390 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001391 save->dr6 = 0xffff0ff0;
Avi Kivityf6e78472010-08-02 15:30:20 +03001392 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001393 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001394 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001395
Joerg Roedele0231712010-02-24 18:59:10 +01001396 /*
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001397 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001398 * It also updates the guest-visible cr0 value.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001399 */
Paolo Bonzini79a80592015-09-21 07:46:55 +02001400 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Igor Mammedovebae8712015-09-18 15:39:05 +02001401 kvm_mmu_reset_context(&svm->vcpu);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001402
Rusty Russell66aee912007-07-17 23:34:16 +10001403 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001404 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001405
1406 if (npt_enabled) {
1407 /* Setup VMCB for Nested Paging */
Tom Lendackycea3a192017-12-04 10:57:24 -06001408 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001409 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001410 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001411 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1412 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Radim Krčmář74545702015-04-27 15:11:25 +02001413 save->g_pat = svm->vcpu.arch.pat;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001414 save->cr3 = 0;
1415 save->cr4 = 0;
1416 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001417 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001418
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001419 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001420 svm->vcpu.arch.hflags = 0;
1421
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001422 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001423 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001424 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001425 }
1426
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001427 if (kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001428 avic_init_vmcb(svm);
1429
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001430 /*
1431 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1432 * in VMCB and clear intercepts to avoid #VMEXIT.
1433 */
1434 if (vls) {
1435 clr_intercept(svm, INTERCEPT_VMLOAD);
1436 clr_intercept(svm, INTERCEPT_VMSAVE);
1437 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1438 }
1439
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001440 if (vgif) {
1441 clr_intercept(svm, INTERCEPT_STGI);
1442 clr_intercept(svm, INTERCEPT_CLGI);
1443 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1444 }
1445
Brijesh Singh1654efc2017-12-04 10:57:34 -06001446 if (sev_guest(svm->vcpu.kvm))
1447 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1448
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001449 mark_all_dirty(svm->vmcb);
1450
Joerg Roedel2af91942009-08-07 11:49:28 +02001451 enable_gif(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001452
1453}
1454
Dan Carpenterd3e7dec2017-05-18 10:38:53 +03001455static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1456 unsigned int index)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001457{
1458 u64 *avic_physical_id_table;
1459 struct kvm_arch *vm_data = &vcpu->kvm->arch;
1460
1461 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1462 return NULL;
1463
1464 avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
1465
1466 return &avic_physical_id_table[index];
1467}
1468
1469/**
1470 * Note:
1471 * AVIC hardware walks the nested page table to check permissions,
1472 * but does not use the SPA address specified in the leaf page
1473 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
1474 * field of the VMCB. Therefore, we set up the
1475 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1476 */
1477static int avic_init_access_page(struct kvm_vcpu *vcpu)
1478{
1479 struct kvm *kvm = vcpu->kvm;
1480 int ret;
1481
1482 if (kvm->arch.apic_access_page_done)
1483 return 0;
1484
1485 ret = x86_set_memory_region(kvm,
1486 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1487 APIC_DEFAULT_PHYS_BASE,
1488 PAGE_SIZE);
1489 if (ret)
1490 return ret;
1491
1492 kvm->arch.apic_access_page_done = true;
1493 return 0;
1494}
1495
1496static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1497{
1498 int ret;
1499 u64 *entry, new_entry;
1500 int id = vcpu->vcpu_id;
1501 struct vcpu_svm *svm = to_svm(vcpu);
1502
1503 ret = avic_init_access_page(vcpu);
1504 if (ret)
1505 return ret;
1506
1507 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1508 return -EINVAL;
1509
1510 if (!svm->vcpu.arch.apic->regs)
1511 return -EINVAL;
1512
1513 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1514
1515 /* Setting AVIC backing page address in the phy APIC ID table */
1516 entry = avic_get_physical_id_entry(vcpu, id);
1517 if (!entry)
1518 return -EINVAL;
1519
1520 new_entry = READ_ONCE(*entry);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001521 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1522 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1523 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001524 WRITE_ONCE(*entry, new_entry);
1525
1526 svm->avic_physical_id_cache = entry;
1527
1528 return 0;
1529}
1530
Brijesh Singh1654efc2017-12-04 10:57:34 -06001531static void __sev_asid_free(int asid)
1532{
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001533 struct svm_cpu_data *sd;
1534 int cpu, pos;
Brijesh Singh1654efc2017-12-04 10:57:34 -06001535
1536 pos = asid - 1;
1537 clear_bit(pos, sev_asid_bitmap);
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001538
1539 for_each_possible_cpu(cpu) {
1540 sd = per_cpu(svm_data, cpu);
1541 sd->sev_vmcbs[pos] = NULL;
1542 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001543}
1544
1545static void sev_asid_free(struct kvm *kvm)
1546{
1547 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1548
1549 __sev_asid_free(sev->asid);
1550}
1551
Brijesh Singh59414c92017-12-04 10:57:35 -06001552static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1553{
1554 struct sev_data_decommission *decommission;
1555 struct sev_data_deactivate *data;
1556
1557 if (!handle)
1558 return;
1559
1560 data = kzalloc(sizeof(*data), GFP_KERNEL);
1561 if (!data)
1562 return;
1563
1564 /* deactivate handle */
1565 data->handle = handle;
1566 sev_guest_deactivate(data, NULL);
1567
1568 wbinvd_on_all_cpus();
1569 sev_guest_df_flush(NULL);
1570 kfree(data);
1571
1572 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1573 if (!decommission)
1574 return;
1575
1576 /* decommission handle */
1577 decommission->handle = handle;
1578 sev_guest_decommission(decommission, NULL);
1579
1580 kfree(decommission);
1581}
1582
Brijesh Singh89c50582017-12-04 10:57:35 -06001583static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1584 unsigned long ulen, unsigned long *n,
1585 int write)
1586{
1587 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1588 unsigned long npages, npinned, size;
1589 unsigned long locked, lock_limit;
1590 struct page **pages;
1591 int first, last;
1592
1593 /* Calculate number of pages. */
1594 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1595 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1596 npages = (last - first + 1);
1597
1598 locked = sev->pages_locked + npages;
1599 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1600 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1601 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1602 return NULL;
1603 }
1604
1605 /* Avoid using vmalloc for smaller buffers. */
1606 size = npages * sizeof(struct page *);
1607 if (size > PAGE_SIZE)
1608 pages = vmalloc(size);
1609 else
1610 pages = kmalloc(size, GFP_KERNEL);
1611
1612 if (!pages)
1613 return NULL;
1614
1615 /* Pin the user virtual address. */
1616 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
1617 if (npinned != npages) {
1618 pr_err("SEV: Failure locking %lu pages.\n", npages);
1619 goto err;
1620 }
1621
1622 *n = npages;
1623 sev->pages_locked = locked;
1624
1625 return pages;
1626
1627err:
1628 if (npinned > 0)
1629 release_pages(pages, npinned);
1630
1631 kvfree(pages);
1632 return NULL;
1633}
1634
1635static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1636 unsigned long npages)
1637{
1638 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1639
1640 release_pages(pages, npages);
1641 kvfree(pages);
1642 sev->pages_locked -= npages;
1643}
1644
1645static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1646{
1647 uint8_t *page_virtual;
1648 unsigned long i;
1649
1650 if (npages == 0 || pages == NULL)
1651 return;
1652
1653 for (i = 0; i < npages; i++) {
1654 page_virtual = kmap_atomic(pages[i]);
1655 clflush_cache_range(page_virtual, PAGE_SIZE);
1656 kunmap_atomic(page_virtual);
1657 }
1658}
1659
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001660static void __unregister_enc_region_locked(struct kvm *kvm,
1661 struct enc_region *region)
1662{
1663 /*
1664 * The guest may change the memory encryption attribute from C=0 -> C=1
1665 * or vice versa for this memory range. Lets make sure caches are
1666 * flushed to ensure that guest data gets written into memory with
1667 * correct C-bit.
1668 */
1669 sev_clflush_pages(region->pages, region->npages);
1670
1671 sev_unpin_memory(kvm, region->pages, region->npages);
1672 list_del(&region->list);
1673 kfree(region);
1674}
1675
Brijesh Singh1654efc2017-12-04 10:57:34 -06001676static void sev_vm_destroy(struct kvm *kvm)
1677{
Brijesh Singh59414c92017-12-04 10:57:35 -06001678 struct kvm_sev_info *sev = &kvm->arch.sev_info;
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001679 struct list_head *head = &sev->regions_list;
1680 struct list_head *pos, *q;
Brijesh Singh59414c92017-12-04 10:57:35 -06001681
Brijesh Singh1654efc2017-12-04 10:57:34 -06001682 if (!sev_guest(kvm))
1683 return;
1684
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001685 mutex_lock(&kvm->lock);
1686
1687 /*
1688 * if userspace was terminated before unregistering the memory regions
1689 * then lets unpin all the registered memory.
1690 */
1691 if (!list_empty(head)) {
1692 list_for_each_safe(pos, q, head) {
1693 __unregister_enc_region_locked(kvm,
1694 list_entry(pos, struct enc_region, list));
1695 }
1696 }
1697
1698 mutex_unlock(&kvm->lock);
1699
Brijesh Singh59414c92017-12-04 10:57:35 -06001700 sev_unbind_asid(kvm, sev->handle);
Brijesh Singh1654efc2017-12-04 10:57:34 -06001701 sev_asid_free(kvm);
1702}
1703
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001704static void avic_vm_destroy(struct kvm *kvm)
1705{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001706 unsigned long flags;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001707 struct kvm_arch *vm_data = &kvm->arch;
1708
Dmitry Vyukov3863dff2017-01-24 14:06:48 +01001709 if (!avic)
1710 return;
1711
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001712 if (vm_data->avic_logical_id_table_page)
1713 __free_page(vm_data->avic_logical_id_table_page);
1714 if (vm_data->avic_physical_id_table_page)
1715 __free_page(vm_data->avic_physical_id_table_page);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001716
1717 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1718 hash_del(&vm_data->hnode);
1719 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001720}
1721
Brijesh Singh1654efc2017-12-04 10:57:34 -06001722static void svm_vm_destroy(struct kvm *kvm)
1723{
1724 avic_vm_destroy(kvm);
1725 sev_vm_destroy(kvm);
1726}
1727
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001728static int avic_vm_init(struct kvm *kvm)
1729{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001730 unsigned long flags;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001731 int err = -ENOMEM;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001732 struct kvm_arch *vm_data = &kvm->arch;
1733 struct page *p_page;
1734 struct page *l_page;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001735 struct kvm_arch *ka;
1736 u32 vm_id;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001737
1738 if (!avic)
1739 return 0;
1740
1741 /* Allocating physical APIC ID table (4KB) */
1742 p_page = alloc_page(GFP_KERNEL);
1743 if (!p_page)
1744 goto free_avic;
1745
1746 vm_data->avic_physical_id_table_page = p_page;
1747 clear_page(page_address(p_page));
1748
1749 /* Allocating logical APIC ID table (4KB) */
1750 l_page = alloc_page(GFP_KERNEL);
1751 if (!l_page)
1752 goto free_avic;
1753
1754 vm_data->avic_logical_id_table_page = l_page;
1755 clear_page(page_address(l_page));
1756
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001757 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001758 again:
1759 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1760 if (vm_id == 0) { /* id is 1-based, zero is not okay */
1761 next_vm_id_wrapped = 1;
1762 goto again;
1763 }
1764 /* Is it still in use? Only possible if wrapped at least once */
1765 if (next_vm_id_wrapped) {
1766 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1767 struct kvm *k2 = container_of(ka, struct kvm, arch);
1768 struct kvm_arch *vd2 = &k2->arch;
1769 if (vd2->avic_vm_id == vm_id)
1770 goto again;
1771 }
1772 }
1773 vm_data->avic_vm_id = vm_id;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001774 hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
1775 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1776
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001777 return 0;
1778
1779free_avic:
1780 avic_vm_destroy(kvm);
1781 return err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001782}
1783
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001784static inline int
1785avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001786{
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001787 int ret = 0;
1788 unsigned long flags;
1789 struct amd_svm_iommu_ir *ir;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001790 struct vcpu_svm *svm = to_svm(vcpu);
1791
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001792 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1793 return 0;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001794
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001795 /*
1796 * Here, we go through the per-vcpu ir_list to update all existing
1797 * interrupt remapping table entry targeting this vcpu.
1798 */
1799 spin_lock_irqsave(&svm->ir_list_lock, flags);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001800
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001801 if (list_empty(&svm->ir_list))
1802 goto out;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001803
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001804 list_for_each_entry(ir, &svm->ir_list, node) {
1805 ret = amd_iommu_update_ga(cpu, r, ir->data);
1806 if (ret)
1807 break;
1808 }
1809out:
1810 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1811 return ret;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001812}
1813
1814static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1815{
1816 u64 entry;
1817 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05001818 int h_physical_id = kvm_cpu_get_apicid(cpu);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001819 struct vcpu_svm *svm = to_svm(vcpu);
1820
1821 if (!kvm_vcpu_apicv_active(vcpu))
1822 return;
1823
1824 if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1825 return;
1826
1827 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1828 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1829
1830 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1831 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1832
1833 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1834 if (svm->avic_is_running)
1835 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1836
1837 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001838 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
1839 svm->avic_is_running);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001840}
1841
1842static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1843{
1844 u64 entry;
1845 struct vcpu_svm *svm = to_svm(vcpu);
1846
1847 if (!kvm_vcpu_apicv_active(vcpu))
1848 return;
1849
1850 entry = READ_ONCE(*(svm->avic_physical_id_cache));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001851 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1852 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1853
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001854 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1855 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001856}
1857
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001858/**
1859 * This function is called during VCPU halt/unhalt.
1860 */
1861static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1862{
1863 struct vcpu_svm *svm = to_svm(vcpu);
1864
1865 svm->avic_is_running = is_run;
1866 if (is_run)
1867 avic_vcpu_load(vcpu, vcpu->cpu);
1868 else
1869 avic_vcpu_put(vcpu);
1870}
1871
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001872static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001873{
1874 struct vcpu_svm *svm = to_svm(vcpu);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001875 u32 dummy;
1876 u32 eax = 1;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001877
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001878 if (!init_event) {
1879 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1880 MSR_IA32_APICBASE_ENABLE;
1881 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1882 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1883 }
Paolo Bonzini56908912015-10-19 11:30:19 +02001884 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001885
Yu Zhange911eb32017-08-24 20:27:52 +08001886 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001887 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001888
1889 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1890 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001891}
1892
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001893static int avic_init_vcpu(struct vcpu_svm *svm)
1894{
1895 int ret;
1896
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001897 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001898 return 0;
1899
1900 ret = avic_init_backing_page(&svm->vcpu);
1901 if (ret)
1902 return ret;
1903
1904 INIT_LIST_HEAD(&svm->ir_list);
1905 spin_lock_init(&svm->ir_list_lock);
1906
1907 return ret;
1908}
1909
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001910static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001911{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001912 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001913 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001914 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001915 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001916 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001917 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001918
Rusty Russellc16f8622007-07-30 21:12:19 +10001919 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001920 if (!svm) {
1921 err = -ENOMEM;
1922 goto out;
1923 }
1924
1925 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1926 if (err)
1927 goto free_svm;
1928
Joerg Roedelf65c2292008-02-13 18:58:46 +01001929 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001930 page = alloc_page(GFP_KERNEL);
1931 if (!page)
1932 goto uninit;
1933
Joerg Roedelf65c2292008-02-13 18:58:46 +01001934 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1935 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001936 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001937
1938 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1939 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001940 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001941
Alexander Grafb286d5d2008-11-25 20:17:05 +01001942 hsave_page = alloc_page(GFP_KERNEL);
1943 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001944 goto free_page3;
1945
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001946 err = avic_init_vcpu(svm);
1947 if (err)
1948 goto free_page4;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001949
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001950 /* We initialize this flag to true to make sure that the is_running
1951 * bit would be set the first time the vcpu is loaded.
1952 */
1953 svm->avic_is_running = true;
1954
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001955 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001956
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001957 svm->msrpm = page_address(msrpm_pages);
1958 svm_vcpu_init_msrpm(svm->msrpm);
1959
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001960 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001961 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001962
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001963 svm->vmcb = page_address(page);
1964 clear_page(svm->vmcb);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001965 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001966 svm->asid_generation = 0;
Paolo Bonzini56908912015-10-19 11:30:19 +02001967 init_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001968
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05001969 svm_init_osvw(&svm->vcpu);
1970
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001971 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001972
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001973free_page4:
1974 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001975free_page3:
1976 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1977free_page2:
1978 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1979free_page1:
1980 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001981uninit:
1982 kvm_vcpu_uninit(&svm->vcpu);
1983free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001984 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001985out:
1986 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001987}
1988
1989static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1990{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001991 struct vcpu_svm *svm = to_svm(vcpu);
1992
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001993 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001994 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001995 __free_page(virt_to_page(svm->nested.hsave));
1996 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001997 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001998 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001999}
2000
Avi Kivity15ad7142007-07-11 18:17:21 +03002001static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002002{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002003 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002004 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02002005
Avi Kivity0cc50642007-03-25 12:07:27 +02002006 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03002007 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002008 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02002009 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002010
Avi Kivity82ca2d12010-10-21 12:20:34 +02002011#ifdef CONFIG_X86_64
2012 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
2013#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02002014 savesegment(fs, svm->host.fs);
2015 savesegment(gs, svm->host.gs);
2016 svm->host.ldt = kvm_read_ldt();
2017
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002018 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002019 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002020
Haozhong Zhangad721882015-10-20 15:39:02 +08002021 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
2022 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2023 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
2024 __this_cpu_write(current_tsc_ratio, tsc_ratio);
2025 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
2026 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002027 }
Paolo Bonzini46896c72015-11-12 14:49:16 +01002028 /* This assumes that the kernel never uses MSR_TSC_AUX */
2029 if (static_cpu_has(X86_FEATURE_RDTSCP))
2030 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002031
2032 avic_vcpu_load(vcpu, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002033}
2034
2035static void svm_vcpu_put(struct kvm_vcpu *vcpu)
2036{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002037 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002038 int i;
2039
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002040 avic_vcpu_put(vcpu);
2041
Avi Kivitye1beb1d2007-11-18 13:50:24 +02002042 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02002043 kvm_load_ldt(svm->host.ldt);
2044#ifdef CONFIG_X86_64
2045 loadsegment(fs, svm->host.fs);
Andy Lutomirski296f7812016-04-26 12:23:29 -07002046 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01002047 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02002048#else
Avi Kivity831ca602011-03-08 16:09:51 +02002049#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02002050 loadsegment(gs, svm->host.gs);
2051#endif
Avi Kivity831ca602011-03-08 16:09:51 +02002052#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002053 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002054 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002055}
2056
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002057static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2058{
2059 avic_set_running(vcpu, false);
2060}
2061
2062static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2063{
2064 avic_set_running(vcpu, true);
2065}
2066
Avi Kivity6aa8b732006-12-10 02:21:36 -08002067static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2068{
Ladi Prosek9b611742017-06-21 09:06:59 +02002069 struct vcpu_svm *svm = to_svm(vcpu);
2070 unsigned long rflags = svm->vmcb->save.rflags;
2071
2072 if (svm->nmi_singlestep) {
2073 /* Hide our flags if they were not set by the guest */
2074 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2075 rflags &= ~X86_EFLAGS_TF;
2076 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2077 rflags &= ~X86_EFLAGS_RF;
2078 }
2079 return rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002080}
2081
2082static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2083{
Ladi Prosek9b611742017-06-21 09:06:59 +02002084 if (to_svm(vcpu)->nmi_singlestep)
2085 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2086
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002087 /*
Andrea Gelminibb3541f2016-05-21 14:14:44 +02002088 * Any change of EFLAGS.VM is accompanied by a reload of SS
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002089 * (caused by either a task switch or an inter-privilege IRET),
2090 * so we do not need to update the CPL here.
2091 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002092 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002093}
2094
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002095static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2096{
2097 switch (reg) {
2098 case VCPU_EXREG_PDPTR:
2099 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002100 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002101 break;
2102 default:
2103 BUG();
2104 }
2105}
2106
Alexander Graff0b85052008-11-25 20:17:01 +01002107static void svm_set_vintr(struct vcpu_svm *svm)
2108{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002109 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002110}
2111
2112static void svm_clear_vintr(struct vcpu_svm *svm)
2113{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002114 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002115}
2116
Avi Kivity6aa8b732006-12-10 02:21:36 -08002117static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2118{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002119 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002120
2121 switch (seg) {
2122 case VCPU_SREG_CS: return &save->cs;
2123 case VCPU_SREG_DS: return &save->ds;
2124 case VCPU_SREG_ES: return &save->es;
2125 case VCPU_SREG_FS: return &save->fs;
2126 case VCPU_SREG_GS: return &save->gs;
2127 case VCPU_SREG_SS: return &save->ss;
2128 case VCPU_SREG_TR: return &save->tr;
2129 case VCPU_SREG_LDTR: return &save->ldtr;
2130 }
2131 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00002132 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002133}
2134
2135static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2136{
2137 struct vmcb_seg *s = svm_seg(vcpu, seg);
2138
2139 return s->base;
2140}
2141
2142static void svm_get_segment(struct kvm_vcpu *vcpu,
2143 struct kvm_segment *var, int seg)
2144{
2145 struct vmcb_seg *s = svm_seg(vcpu, seg);
2146
2147 var->base = s->base;
2148 var->limit = s->limit;
2149 var->selector = s->selector;
2150 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2151 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2152 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2153 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2154 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2155 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2156 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
Jim Mattson80112c82014-07-08 09:47:41 +05302157
2158 /*
2159 * AMD CPUs circa 2014 track the G bit for all segments except CS.
2160 * However, the SVM spec states that the G bit is not observed by the
2161 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2162 * So let's synthesize a legal G bit for all segments, this helps
2163 * running KVM nested. It also helps cross-vendor migration, because
2164 * Intel's vmentry has a check on the 'G' bit.
2165 */
2166 var->g = s->limit > 0xfffff;
Amit Shah25022ac2008-10-27 09:04:17 +00002167
Joerg Roedele0231712010-02-24 18:59:10 +01002168 /*
2169 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02002170 * for cross vendor migration purposes by "not present"
2171 */
Gioh Kim8eae9572017-05-30 15:24:45 +02002172 var->unusable = !var->present;
Andre Przywara19bca6a2009-04-28 12:45:30 +02002173
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002174 switch (seg) {
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002175 case VCPU_SREG_TR:
2176 /*
2177 * Work around a bug where the busy flag in the tr selector
2178 * isn't exposed
2179 */
Amit Shahc0d09822008-10-27 09:04:18 +00002180 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002181 break;
2182 case VCPU_SREG_DS:
2183 case VCPU_SREG_ES:
2184 case VCPU_SREG_FS:
2185 case VCPU_SREG_GS:
2186 /*
2187 * The accessed bit must always be set in the segment
2188 * descriptor cache, although it can be cleared in the
2189 * descriptor, the cached bit always remains at 1. Since
2190 * Intel has a check on this, set it here to support
2191 * cross-vendor migration.
2192 */
2193 if (!var->unusable)
2194 var->type |= 0x1;
2195 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02002196 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01002197 /*
2198 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02002199 * descriptor is left as 1, although the whole segment has
2200 * been made unusable. Clear it here to pass an Intel VMX
2201 * entry check when cross vendor migrating.
2202 */
2203 if (var->unusable)
2204 var->db = 0;
Roman Pend9c1b542017-06-01 10:55:03 +02002205 /* This is symmetric with svm_set_segment() */
Jan Kiszka33b458d2014-06-29 17:12:43 +02002206 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
Andre Przywarab586eb02009-04-28 12:45:43 +02002207 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002208 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002209}
2210
Izik Eidus2e4d2652008-03-24 19:38:34 +02002211static int svm_get_cpl(struct kvm_vcpu *vcpu)
2212{
2213 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2214
2215 return save->cpl;
2216}
2217
Gleb Natapov89a27f42010-02-16 10:51:48 +02002218static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002219{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002220 struct vcpu_svm *svm = to_svm(vcpu);
2221
Gleb Natapov89a27f42010-02-16 10:51:48 +02002222 dt->size = svm->vmcb->save.idtr.limit;
2223 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002224}
2225
Gleb Natapov89a27f42010-02-16 10:51:48 +02002226static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002227{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002228 struct vcpu_svm *svm = to_svm(vcpu);
2229
Gleb Natapov89a27f42010-02-16 10:51:48 +02002230 svm->vmcb->save.idtr.limit = dt->size;
2231 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002232 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002233}
2234
Gleb Natapov89a27f42010-02-16 10:51:48 +02002235static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002236{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002237 struct vcpu_svm *svm = to_svm(vcpu);
2238
Gleb Natapov89a27f42010-02-16 10:51:48 +02002239 dt->size = svm->vmcb->save.gdtr.limit;
2240 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002241}
2242
Gleb Natapov89a27f42010-02-16 10:51:48 +02002243static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002244{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002245 struct vcpu_svm *svm = to_svm(vcpu);
2246
Gleb Natapov89a27f42010-02-16 10:51:48 +02002247 svm->vmcb->save.gdtr.limit = dt->size;
2248 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002249 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002250}
2251
Avi Kivitye8467fd2009-12-29 18:43:06 +02002252static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2253{
2254}
2255
Avi Kivityaff48ba2010-12-05 18:56:11 +02002256static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2257{
2258}
2259
Anthony Liguori25c4c272007-04-27 09:29:21 +03002260static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08002261{
2262}
2263
Avi Kivityd2251572010-01-06 10:55:27 +02002264static void update_cr0_intercept(struct vcpu_svm *svm)
2265{
2266 ulong gcr0 = svm->vcpu.arch.cr0;
2267 u64 *hcr0 = &svm->vmcb->save.cr0;
2268
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002269 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2270 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
Avi Kivityd2251572010-01-06 10:55:27 +02002271
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002272 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002273
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002274 if (gcr0 == *hcr0) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002275 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2276 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002277 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002278 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2279 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002280 }
2281}
2282
Avi Kivity6aa8b732006-12-10 02:21:36 -08002283static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2284{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002285 struct vcpu_svm *svm = to_svm(vcpu);
2286
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002287#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02002288 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10002289 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002290 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002291 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002292 }
2293
Mike Dayd77c26f2007-10-08 09:02:08 -04002294 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002295 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002296 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002297 }
2298 }
2299#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002300 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02002301
2302 if (!npt_enabled)
2303 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02002304
Paolo Bonzinibcf166a2015-10-01 13:19:55 +02002305 /*
2306 * re-enable caching here because the QEMU bios
2307 * does not do it - this results in some delay at
2308 * reboot
2309 */
2310 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2311 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002312 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002313 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002314 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002315}
2316
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002317static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002318{
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07002319 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002320 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2321
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002322 if (cr4 & X86_CR4_VMXE)
2323 return 1;
2324
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002325 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002326 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02002327
Joerg Roedelec077262008-04-09 14:15:28 +02002328 vcpu->arch.cr4 = cr4;
2329 if (!npt_enabled)
2330 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02002331 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02002332 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002333 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002334 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002335}
2336
2337static void svm_set_segment(struct kvm_vcpu *vcpu,
2338 struct kvm_segment *var, int seg)
2339{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002340 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002341 struct vmcb_seg *s = svm_seg(vcpu, seg);
2342
2343 s->base = var->base;
2344 s->limit = var->limit;
2345 s->selector = var->selector;
Roman Pend9c1b542017-06-01 10:55:03 +02002346 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2347 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2348 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2349 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2350 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2351 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2352 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2353 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002354
2355 /*
2356 * This is always accurate, except if SYSRET returned to a segment
2357 * with SS.DPL != 3. Intel does not have this quirk, and always
2358 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2359 * would entail passing the CPL to userspace and back.
2360 */
2361 if (seg == VCPU_SREG_SS)
Roman Pend9c1b542017-06-01 10:55:03 +02002362 /* This is symmetric with svm_get_segment() */
2363 svm->vmcb->save.cpl = (var->dpl & 3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002364
Joerg Roedel060d0c92010-12-03 11:45:57 +01002365 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002366}
2367
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01002368static void update_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002369{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002370 struct vcpu_svm *svm = to_svm(vcpu);
2371
Joerg Roedel18c918c2010-11-30 18:03:59 +01002372 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03002373
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002374 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002375 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01002376 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002377 } else
2378 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03002379}
2380
Tejun Heo0fe1e002009-10-29 22:34:14 +09002381static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002382{
Tejun Heo0fe1e002009-10-29 22:34:14 +09002383 if (sd->next_asid > sd->max_asid) {
2384 ++sd->asid_generation;
Brijesh Singh4faefff2017-12-04 10:57:25 -06002385 sd->next_asid = sd->min_asid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002386 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002387 }
2388
Tejun Heo0fe1e002009-10-29 22:34:14 +09002389 svm->asid_generation = sd->asid_generation;
2390 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01002391
2392 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002393}
2394
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01002395static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2396{
2397 return to_svm(vcpu)->vmcb->save.dr6;
2398}
2399
2400static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2401{
2402 struct vcpu_svm *svm = to_svm(vcpu);
2403
2404 svm->vmcb->save.dr6 = value;
2405 mark_dirty(svm->vmcb, VMCB_DR);
2406}
2407
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002408static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2409{
2410 struct vcpu_svm *svm = to_svm(vcpu);
2411
2412 get_debugreg(vcpu->arch.db[0], 0);
2413 get_debugreg(vcpu->arch.db[1], 1);
2414 get_debugreg(vcpu->arch.db[2], 2);
2415 get_debugreg(vcpu->arch.db[3], 3);
2416 vcpu->arch.dr6 = svm_get_dr6(vcpu);
2417 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2418
2419 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2420 set_dr_intercepts(svm);
2421}
2422
Gleb Natapov020df072010-04-13 10:05:23 +03002423static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002424{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002425 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002426
Gleb Natapov020df072010-04-13 10:05:23 +03002427 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01002428 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002429}
2430
Avi Kivity851ba692009-08-24 11:10:17 +03002431static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002432{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06002433 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002434 u64 error_code = svm->vmcb->control.exit_info_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002435
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002436 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
Andre Przywaradc25e892010-12-21 11:12:07 +01002437 svm->vmcb->control.insn_bytes,
Paolo Bonzinid0006532017-08-11 18:36:43 +02002438 svm->vmcb->control.insn_len);
2439}
2440
2441static int npf_interception(struct vcpu_svm *svm)
2442{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06002443 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Paolo Bonzinid0006532017-08-11 18:36:43 +02002444 u64 error_code = svm->vmcb->control.exit_info_1;
2445
2446 trace_kvm_page_fault(fault_address, error_code);
2447 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2448 svm->vmcb->control.insn_bytes,
2449 svm->vmcb->control.insn_len);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002450}
2451
Avi Kivity851ba692009-08-24 11:10:17 +03002452static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002453{
Avi Kivity851ba692009-08-24 11:10:17 +03002454 struct kvm_run *kvm_run = svm->vcpu.run;
2455
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002456 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03002457 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02002458 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002459 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2460 return 1;
2461 }
Gleb Natapov44c11432009-05-11 13:35:52 +03002462
Jan Kiszka6be7d302009-10-18 13:24:54 +02002463 if (svm->nmi_singlestep) {
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02002464 disable_nmi_singlestep(svm);
Gleb Natapov44c11432009-05-11 13:35:52 +03002465 }
2466
2467 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01002468 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03002469 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2470 kvm_run->debug.arch.pc =
2471 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2472 kvm_run->debug.arch.exception = DB_VECTOR;
2473 return 0;
2474 }
2475
2476 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002477}
2478
Avi Kivity851ba692009-08-24 11:10:17 +03002479static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002480{
Avi Kivity851ba692009-08-24 11:10:17 +03002481 struct kvm_run *kvm_run = svm->vcpu.run;
2482
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002483 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2484 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2485 kvm_run->debug.arch.exception = BP_VECTOR;
2486 return 0;
2487}
2488
Avi Kivity851ba692009-08-24 11:10:17 +03002489static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002490{
2491 int er;
2492
Andre Przywara51d8b662010-12-21 11:12:02 +01002493 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002494 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002495 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002496 return 1;
2497}
2498
Eric Northup54a20552015-11-03 18:03:53 +01002499static int ac_interception(struct vcpu_svm *svm)
2500{
2501 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2502 return 1;
2503}
2504
Joerg Roedel67ec6602010-05-17 14:43:35 +02002505static bool is_erratum_383(void)
2506{
2507 int err, i;
2508 u64 value;
2509
2510 if (!erratum_383_found)
2511 return false;
2512
2513 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2514 if (err)
2515 return false;
2516
2517 /* Bit 62 may or may not be set for this mce */
2518 value &= ~(1ULL << 62);
2519
2520 if (value != 0xb600000000010015ULL)
2521 return false;
2522
2523 /* Clear MCi_STATUS registers */
2524 for (i = 0; i < 6; ++i)
2525 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2526
2527 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2528 if (!err) {
2529 u32 low, high;
2530
2531 value &= ~(1ULL << 2);
2532 low = lower_32_bits(value);
2533 high = upper_32_bits(value);
2534
2535 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2536 }
2537
2538 /* Flush tlb to evict multi-match entries */
2539 __flush_tlb_all();
2540
2541 return true;
2542}
2543
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002544static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02002545{
Joerg Roedel67ec6602010-05-17 14:43:35 +02002546 if (is_erratum_383()) {
2547 /*
2548 * Erratum 383 triggered. Guest state is corrupt so kill the
2549 * guest.
2550 */
2551 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2552
Avi Kivitya8eeb042010-05-10 12:34:53 +03002553 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02002554
2555 return;
2556 }
2557
Joerg Roedel53371b52008-04-09 14:15:30 +02002558 /*
2559 * On an #MC intercept the MCE handler is not called automatically in
2560 * the host. So do it by hand here.
2561 */
2562 asm volatile (
2563 "int $0x12\n");
2564 /* not sure if we ever come back to this point */
2565
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002566 return;
2567}
2568
2569static int mc_interception(struct vcpu_svm *svm)
2570{
Joerg Roedel53371b52008-04-09 14:15:30 +02002571 return 1;
2572}
2573
Avi Kivity851ba692009-08-24 11:10:17 +03002574static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002575{
Avi Kivity851ba692009-08-24 11:10:17 +03002576 struct kvm_run *kvm_run = svm->vcpu.run;
2577
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002578 /*
2579 * VMCB is undefined after a SHUTDOWN intercept
2580 * so reinitialize it.
2581 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002582 clear_page(svm->vmcb);
Paolo Bonzini56908912015-10-19 11:30:19 +02002583 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002584
2585 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2586 return 0;
2587}
2588
Avi Kivity851ba692009-08-24 11:10:17 +03002589static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002590{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002591 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04002592 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002593 int size, in, string, ret;
Avi Kivity039576c2007-03-20 12:46:50 +02002594 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002595
Rusty Russelle756fc62007-07-30 20:07:08 +10002596 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03002597 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002598 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Tom Lendacky8370c3d2016-11-23 12:01:50 -05002599 if (string)
Andre Przywara51d8b662010-12-21 11:12:02 +01002600 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002601
Avi Kivity039576c2007-03-20 12:46:50 +02002602 port = io_info >> 16;
2603 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002604 svm->next_rip = svm->vmcb->control.exit_info_2;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002605 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002606
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002607 /*
2608 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
2609 * KVM_EXIT_DEBUG here.
2610 */
2611 if (in)
2612 return kvm_fast_pio_in(vcpu, size, port) && ret;
2613 else
2614 return kvm_fast_pio_out(vcpu, size, port) && ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002615}
2616
Avi Kivity851ba692009-08-24 11:10:17 +03002617static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02002618{
2619 return 1;
2620}
2621
Avi Kivity851ba692009-08-24 11:10:17 +03002622static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02002623{
2624 ++svm->vcpu.stat.irq_exits;
2625 return 1;
2626}
2627
Avi Kivity851ba692009-08-24 11:10:17 +03002628static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002629{
2630 return 1;
2631}
2632
Avi Kivity851ba692009-08-24 11:10:17 +03002633static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002634{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002635 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10002636 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002637}
2638
Avi Kivity851ba692009-08-24 11:10:17 +03002639static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02002640{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002641 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Andrey Smetanin0d9c0552016-02-11 16:44:59 +03002642 return kvm_emulate_hypercall(&svm->vcpu);
Avi Kivity02e235b2007-02-19 14:37:47 +02002643}
2644
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002645static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2646{
2647 struct vcpu_svm *svm = to_svm(vcpu);
2648
2649 return svm->nested.nested_cr3;
2650}
2651
Avi Kivitye4e517b2011-07-28 11:36:17 +03002652static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2653{
2654 struct vcpu_svm *svm = to_svm(vcpu);
2655 u64 cr3 = svm->nested.nested_cr3;
2656 u64 pdpte;
2657 int ret;
2658
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002659 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002660 offset_in_page(cr3) + index * 8, 8);
Avi Kivitye4e517b2011-07-28 11:36:17 +03002661 if (ret)
2662 return 0;
2663 return pdpte;
2664}
2665
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002666static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2667 unsigned long root)
2668{
2669 struct vcpu_svm *svm = to_svm(vcpu);
2670
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002671 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01002672 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002673 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002674}
2675
Avi Kivity6389ee92010-11-29 16:12:30 +02002676static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2677 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002678{
2679 struct vcpu_svm *svm = to_svm(vcpu);
2680
Paolo Bonzini5e352512014-09-02 13:18:37 +02002681 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2682 /*
2683 * TODO: track the cause of the nested page fault, and
2684 * correctly fill in the high bits of exit_info_1.
2685 */
2686 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2687 svm->vmcb->control.exit_code_hi = 0;
2688 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2689 svm->vmcb->control.exit_info_2 = fault->address;
2690 }
2691
2692 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2693 svm->vmcb->control.exit_info_1 |= fault->error_code;
2694
2695 /*
2696 * The present bit is always zero for page structure faults on real
2697 * hardware.
2698 */
2699 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2700 svm->vmcb->control.exit_info_1 &= ~1;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002701
2702 nested_svm_vmexit(svm);
2703}
2704
Paolo Bonzini8a3c1a332013-10-02 16:56:13 +02002705static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +02002706{
Paolo Bonziniad896af2013-10-02 16:56:14 +02002707 WARN_ON(mmu_is_nested(vcpu));
2708 kvm_init_shadow_mmu(vcpu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002709 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
2710 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
Avi Kivitye4e517b2011-07-28 11:36:17 +03002711 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
Joerg Roedel4b161842010-09-10 17:31:03 +02002712 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
Yu Zhang855feb62017-08-24 20:27:55 +08002713 vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu);
Xiao Guangrongc258b622015-08-05 12:04:24 +08002714 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002715 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
Joerg Roedel4b161842010-09-10 17:31:03 +02002716}
2717
2718static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2719{
2720 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2721}
2722
Alexander Grafc0725422008-11-25 20:17:03 +01002723static int nested_svm_check_permissions(struct vcpu_svm *svm)
2724{
Dan Carpentere9196ce2017-05-18 10:39:53 +03002725 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2726 !is_paging(&svm->vcpu)) {
Alexander Grafc0725422008-11-25 20:17:03 +01002727 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2728 return 1;
2729 }
2730
2731 if (svm->vmcb->save.cpl) {
2732 kvm_inject_gp(&svm->vcpu, 0);
2733 return 1;
2734 }
2735
Dan Carpentere9196ce2017-05-18 10:39:53 +03002736 return 0;
Alexander Grafc0725422008-11-25 20:17:03 +01002737}
2738
Alexander Grafcf74a782008-11-25 20:17:08 +01002739static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2740 bool has_error_code, u32 error_code)
2741{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002742 int vmexit;
2743
Joerg Roedel20307532010-11-29 17:51:48 +01002744 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02002745 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002746
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002747 vmexit = nested_svm_intercept(svm);
2748 if (vmexit != NESTED_EXIT_DONE)
2749 return 0;
2750
Joerg Roedel0295ad72009-08-07 11:49:37 +02002751 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2752 svm->vmcb->control.exit_code_hi = 0;
2753 svm->vmcb->control.exit_info_1 = error_code;
Paolo Bonzinib96fb432017-07-27 12:29:32 +02002754
2755 /*
2756 * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
2757 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2758 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
2759 * written only when inject_pending_event runs (DR6 would written here
2760 * too). This should be conditional on a new capability---if the
2761 * capability is disabled, kvm_multiple_exception would write the
2762 * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
2763 */
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002764 if (svm->vcpu.arch.exception.nested_apf)
2765 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
2766 else
2767 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
Joerg Roedel0295ad72009-08-07 11:49:37 +02002768
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002769 svm->nested.exit_required = true;
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002770 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002771}
2772
Joerg Roedel8fe54652010-02-19 16:23:01 +01002773/* This function returns true if it is save to enable the irq window */
2774static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002775{
Joerg Roedel20307532010-11-29 17:51:48 +01002776 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002777 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002778
Joerg Roedel26666952009-08-07 11:49:46 +02002779 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002780 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002781
Joerg Roedel26666952009-08-07 11:49:46 +02002782 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002783 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002784
Gleb Natapova0a07cd2010-09-20 10:15:32 +02002785 /*
2786 * if vmexit was already requested (by intercepted exception
2787 * for instance) do not overwrite it with "external interrupt"
2788 * vmexit.
2789 */
2790 if (svm->nested.exit_required)
2791 return false;
2792
Joerg Roedel197717d2010-02-24 18:59:19 +01002793 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2794 svm->vmcb->control.exit_info_1 = 0;
2795 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02002796
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002797 if (svm->nested.intercept & 1ULL) {
2798 /*
2799 * The #vmexit can't be emulated here directly because this
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002800 * code path runs with irqs and preemption disabled. A
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002801 * #vmexit emulation might sleep. Only signal request for
2802 * the #vmexit here.
2803 */
2804 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02002805 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01002806 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002807 }
2808
Joerg Roedel8fe54652010-02-19 16:23:01 +01002809 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002810}
2811
Joerg Roedel887f5002010-02-24 18:59:12 +01002812/* This function returns true if it is save to enable the nmi window */
2813static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2814{
Joerg Roedel20307532010-11-29 17:51:48 +01002815 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01002816 return true;
2817
2818 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2819 return true;
2820
2821 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2822 svm->nested.exit_required = true;
2823
2824 return false;
2825}
2826
Joerg Roedel7597f122010-02-19 16:23:00 +01002827static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002828{
2829 struct page *page;
2830
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01002831 might_sleep();
2832
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002833 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002834 if (is_error_page(page))
2835 goto error;
2836
Joerg Roedel7597f122010-02-19 16:23:00 +01002837 *_page = page;
2838
2839 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002840
2841error:
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002842 kvm_inject_gp(&svm->vcpu, 0);
2843
2844 return NULL;
2845}
2846
Joerg Roedel7597f122010-02-19 16:23:00 +01002847static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002848{
Joerg Roedel7597f122010-02-19 16:23:00 +01002849 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002850 kvm_release_page_dirty(page);
2851}
2852
Joerg Roedelce2ac082010-03-01 15:34:39 +01002853static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002854{
Jan Kiszka9bf41832014-06-30 10:54:17 +02002855 unsigned port, size, iopm_len;
2856 u16 val, mask;
2857 u8 start_bit;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002858 u64 gpa;
2859
2860 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2861 return NESTED_EXIT_HOST;
2862
2863 port = svm->vmcb->control.exit_info_1 >> 16;
Jan Kiszka9bf41832014-06-30 10:54:17 +02002864 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2865 SVM_IOIO_SIZE_SHIFT;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002866 gpa = svm->nested.vmcb_iopm + (port / 8);
Jan Kiszka9bf41832014-06-30 10:54:17 +02002867 start_bit = port % 8;
2868 iopm_len = (start_bit + size > 8) ? 2 : 1;
2869 mask = (0xf >> (4 - size)) << start_bit;
2870 val = 0;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002871
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002872 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
Jan Kiszka9bf41832014-06-30 10:54:17 +02002873 return NESTED_EXIT_DONE;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002874
Jan Kiszka9bf41832014-06-30 10:54:17 +02002875 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002876}
2877
Joerg Roedeld2477822010-03-01 15:34:34 +01002878static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002879{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002880 u32 offset, msr, value;
2881 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002882
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002883 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002884 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002885
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002886 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2887 offset = svm_msrpm_offset(msr);
2888 write = svm->vmcb->control.exit_info_1 & 1;
2889 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002890
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002891 if (offset == MSR_INVALID)
2892 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002893
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002894 /* Offset is in 32 bit units but need in 8 bit units */
2895 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002896
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002897 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002898 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002899
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002900 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002901}
2902
Ladi Prosekab2f4d732017-06-21 09:06:58 +02002903/* DB exceptions for our internal use must not cause vmexit */
2904static int nested_svm_intercept_db(struct vcpu_svm *svm)
2905{
2906 unsigned long dr6;
2907
2908 /* if we're not singlestepping, it's not ours */
2909 if (!svm->nmi_singlestep)
2910 return NESTED_EXIT_DONE;
2911
2912 /* if it's not a singlestep exception, it's not ours */
2913 if (kvm_get_dr(&svm->vcpu, 6, &dr6))
2914 return NESTED_EXIT_DONE;
2915 if (!(dr6 & DR6_BS))
2916 return NESTED_EXIT_DONE;
2917
2918 /* if the guest is singlestepping, it should get the vmexit */
2919 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
2920 disable_nmi_singlestep(svm);
2921 return NESTED_EXIT_DONE;
2922 }
2923
2924 /* it's ours, the nested hypervisor must not see this one */
2925 return NESTED_EXIT_HOST;
2926}
2927
Joerg Roedel410e4d52009-08-07 11:49:44 +02002928static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002929{
Alexander Grafcf74a782008-11-25 20:17:08 +01002930 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002931
Joerg Roedel410e4d52009-08-07 11:49:44 +02002932 switch (exit_code) {
2933 case SVM_EXIT_INTR:
2934 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02002935 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02002936 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002937 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01002938 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02002939 if (npt_enabled)
2940 return NESTED_EXIT_HOST;
2941 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002942 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002943 /* When we're shadowing, trap PFs, but not async PF */
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002944 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002945 return NESTED_EXIT_HOST;
2946 break;
2947 default:
2948 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002949 }
2950
Joerg Roedel410e4d52009-08-07 11:49:44 +02002951 return NESTED_EXIT_CONTINUE;
2952}
2953
2954/*
2955 * If this function returns true, this #vmexit was already handled
2956 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002957static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002958{
2959 u32 exit_code = svm->vmcb->control.exit_code;
2960 int vmexit = NESTED_EXIT_HOST;
2961
Alexander Grafcf74a782008-11-25 20:17:08 +01002962 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002963 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002964 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002965 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002966 case SVM_EXIT_IOIO:
2967 vmexit = nested_svm_intercept_ioio(svm);
2968 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002969 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2970 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2971 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002972 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002973 break;
2974 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002975 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2976 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2977 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002978 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002979 break;
2980 }
2981 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2982 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Ladi Prosekab2f4d732017-06-21 09:06:58 +02002983 if (svm->nested.intercept_exceptions & excp_bits) {
2984 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
2985 vmexit = nested_svm_intercept_db(svm);
2986 else
2987 vmexit = NESTED_EXIT_DONE;
2988 }
Gleb Natapov631bc482010-10-14 11:22:52 +02002989 /* async page fault always cause vmexit */
2990 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002991 svm->vcpu.arch.exception.nested_apf != 0)
Gleb Natapov631bc482010-10-14 11:22:52 +02002992 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002993 break;
2994 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002995 case SVM_EXIT_ERR: {
2996 vmexit = NESTED_EXIT_DONE;
2997 break;
2998 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002999 default: {
3000 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02003001 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02003002 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01003003 }
3004 }
3005
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01003006 return vmexit;
3007}
3008
3009static int nested_svm_exit_handled(struct vcpu_svm *svm)
3010{
3011 int vmexit;
3012
3013 vmexit = nested_svm_intercept(svm);
3014
3015 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003016 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003017
3018 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01003019}
3020
Joerg Roedel0460a972009-08-07 11:49:31 +02003021static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
3022{
3023 struct vmcb_control_area *dst = &dst_vmcb->control;
3024 struct vmcb_control_area *from = &from_vmcb->control;
3025
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003026 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01003027 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02003028 dst->intercept_exceptions = from->intercept_exceptions;
3029 dst->intercept = from->intercept;
3030 dst->iopm_base_pa = from->iopm_base_pa;
3031 dst->msrpm_base_pa = from->msrpm_base_pa;
3032 dst->tsc_offset = from->tsc_offset;
3033 dst->asid = from->asid;
3034 dst->tlb_ctl = from->tlb_ctl;
3035 dst->int_ctl = from->int_ctl;
3036 dst->int_vector = from->int_vector;
3037 dst->int_state = from->int_state;
3038 dst->exit_code = from->exit_code;
3039 dst->exit_code_hi = from->exit_code_hi;
3040 dst->exit_info_1 = from->exit_info_1;
3041 dst->exit_info_2 = from->exit_info_2;
3042 dst->exit_int_info = from->exit_int_info;
3043 dst->exit_int_info_err = from->exit_int_info_err;
3044 dst->nested_ctl = from->nested_ctl;
3045 dst->event_inj = from->event_inj;
3046 dst->event_inj_err = from->event_inj_err;
3047 dst->nested_cr3 = from->nested_cr3;
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003048 dst->virt_ext = from->virt_ext;
Joerg Roedel0460a972009-08-07 11:49:31 +02003049}
3050
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003051static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01003052{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003053 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003054 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02003055 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003056 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01003057
Joerg Roedel17897f32009-10-09 16:08:29 +02003058 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3059 vmcb->control.exit_info_1,
3060 vmcb->control.exit_info_2,
3061 vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01003062 vmcb->control.exit_int_info_err,
3063 KVM_ISA_SVM);
Joerg Roedel17897f32009-10-09 16:08:29 +02003064
Joerg Roedel7597f122010-02-19 16:23:00 +01003065 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003066 if (!nested_vmcb)
3067 return 1;
3068
Joerg Roedel20307532010-11-29 17:51:48 +01003069 /* Exit Guest-Mode */
3070 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01003071 svm->nested.vmcb = 0;
3072
Alexander Grafcf74a782008-11-25 20:17:08 +01003073 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02003074 disable_gif(svm);
3075
3076 nested_vmcb->save.es = vmcb->save.es;
3077 nested_vmcb->save.cs = vmcb->save.cs;
3078 nested_vmcb->save.ss = vmcb->save.ss;
3079 nested_vmcb->save.ds = vmcb->save.ds;
3080 nested_vmcb->save.gdtr = vmcb->save.gdtr;
3081 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02003082 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003083 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02003084 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003085 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003086 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03003087 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003088 nested_vmcb->save.rip = vmcb->save.rip;
3089 nested_vmcb->save.rsp = vmcb->save.rsp;
3090 nested_vmcb->save.rax = vmcb->save.rax;
3091 nested_vmcb->save.dr7 = vmcb->save.dr7;
3092 nested_vmcb->save.dr6 = vmcb->save.dr6;
3093 nested_vmcb->save.cpl = vmcb->save.cpl;
3094
3095 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
3096 nested_vmcb->control.int_vector = vmcb->control.int_vector;
3097 nested_vmcb->control.int_state = vmcb->control.int_state;
3098 nested_vmcb->control.exit_code = vmcb->control.exit_code;
3099 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
3100 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
3101 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
3102 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
3103 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel6092d3d2015-10-14 15:10:54 +02003104
3105 if (svm->nrips_enabled)
3106 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02003107
3108 /*
3109 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3110 * to make sure that we do not lose injected events. So check event_inj
3111 * here and copy it to exit_int_info if it is valid.
3112 * Exit_int_info and event_inj can't be both valid because the case
3113 * below only happens on a VMRUN instruction intercept which has
3114 * no valid exit_int_info set.
3115 */
3116 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3117 struct vmcb_control_area *nc = &nested_vmcb->control;
3118
3119 nc->exit_int_info = vmcb->control.event_inj;
3120 nc->exit_int_info_err = vmcb->control.event_inj_err;
3121 }
3122
Joerg Roedel33740e42009-08-07 11:49:29 +02003123 nested_vmcb->control.tlb_ctl = 0;
3124 nested_vmcb->control.event_inj = 0;
3125 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01003126
3127 /* We always set V_INTR_MASKING and remember the old value in hflags */
3128 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3129 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3130
Alexander Grafcf74a782008-11-25 20:17:08 +01003131 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02003132 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01003133
Alexander Graf219b65d2009-06-15 15:21:25 +02003134 kvm_clear_exception_queue(&svm->vcpu);
3135 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003136
Joerg Roedel4b161842010-09-10 17:31:03 +02003137 svm->nested.nested_cr3 = 0;
3138
Alexander Grafcf74a782008-11-25 20:17:08 +01003139 /* Restore selected save entries */
3140 svm->vmcb->save.es = hsave->save.es;
3141 svm->vmcb->save.cs = hsave->save.cs;
3142 svm->vmcb->save.ss = hsave->save.ss;
3143 svm->vmcb->save.ds = hsave->save.ds;
3144 svm->vmcb->save.gdtr = hsave->save.gdtr;
3145 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003146 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01003147 svm_set_efer(&svm->vcpu, hsave->save.efer);
3148 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3149 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3150 if (npt_enabled) {
3151 svm->vmcb->save.cr3 = hsave->save.cr3;
3152 svm->vcpu.arch.cr3 = hsave->save.cr3;
3153 } else {
Avi Kivity23902182010-06-10 17:02:16 +03003154 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01003155 }
3156 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
3157 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
3158 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
3159 svm->vmcb->save.dr7 = 0;
3160 svm->vmcb->save.cpl = 0;
3161 svm->vmcb->control.exit_int_info = 0;
3162
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003163 mark_all_dirty(svm->vmcb);
3164
Joerg Roedel7597f122010-02-19 16:23:00 +01003165 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01003166
Joerg Roedel4b161842010-09-10 17:31:03 +02003167 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003168 kvm_mmu_reset_context(&svm->vcpu);
3169 kvm_mmu_load(&svm->vcpu);
3170
3171 return 0;
3172}
Alexander Graf3d6368e2008-11-25 20:17:07 +01003173
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003174static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003175{
Joerg Roedel323c3d82010-03-01 15:34:37 +01003176 /*
3177 * This function merges the msr permission bitmaps of kvm and the
Guo Chaoc5ec2e52012-06-28 15:16:43 +08003178 * nested vmcb. It is optimized in that it only merges the parts where
Joerg Roedel323c3d82010-03-01 15:34:37 +01003179 * the kvm msr permission bitmap may contain zero bits
3180 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01003181 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003182
Joerg Roedel323c3d82010-03-01 15:34:37 +01003183 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3184 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003185
Joerg Roedel323c3d82010-03-01 15:34:37 +01003186 for (i = 0; i < MSRPM_OFFSETS; i++) {
3187 u32 value, p;
3188 u64 offset;
3189
3190 if (msrpm_offsets[i] == 0xffffffff)
3191 break;
3192
Joerg Roedel0d6b3532010-03-01 15:34:38 +01003193 p = msrpm_offsets[i];
3194 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01003195
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02003196 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
Joerg Roedel323c3d82010-03-01 15:34:37 +01003197 return false;
3198
3199 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3200 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003201
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05003202 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
Alexander Graf3d6368e2008-11-25 20:17:07 +01003203
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003204 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003205}
3206
Joerg Roedel52c65a302010-08-02 16:46:44 +02003207static bool nested_vmcb_checks(struct vmcb *vmcb)
3208{
3209 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3210 return false;
3211
Joerg Roedeldbe77582010-08-02 16:46:45 +02003212 if (vmcb->control.asid == 0)
3213 return false;
3214
Tom Lendackycea3a192017-12-04 10:57:24 -06003215 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3216 !npt_enabled)
Joerg Roedel4b161842010-09-10 17:31:03 +02003217 return false;
3218
Joerg Roedel52c65a302010-08-02 16:46:44 +02003219 return true;
3220}
3221
Ladi Prosekc2634062017-10-11 16:54:44 +02003222static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3223 struct vmcb *nested_vmcb, struct page *page)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003224{
Avi Kivityf6e78472010-08-02 15:30:20 +03003225 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003226 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3227 else
3228 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3229
Tom Lendackycea3a192017-12-04 10:57:24 -06003230 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
Joerg Roedel4b161842010-09-10 17:31:03 +02003231 kvm_mmu_unload(&svm->vcpu);
3232 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3233 nested_svm_init_mmu_context(&svm->vcpu);
3234 }
3235
Alexander Graf3d6368e2008-11-25 20:17:07 +01003236 /* Load the nested guest state */
3237 svm->vmcb->save.es = nested_vmcb->save.es;
3238 svm->vmcb->save.cs = nested_vmcb->save.cs;
3239 svm->vmcb->save.ss = nested_vmcb->save.ss;
3240 svm->vmcb->save.ds = nested_vmcb->save.ds;
3241 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3242 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003243 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003244 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3245 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3246 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3247 if (npt_enabled) {
3248 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3249 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003250 } else
Avi Kivity23902182010-06-10 17:02:16 +03003251 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003252
3253 /* Guest paging mode is active - reset mmu */
3254 kvm_mmu_reset_context(&svm->vcpu);
3255
Joerg Roedeldefbba52009-08-07 11:49:30 +02003256 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003257 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
3258 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
3259 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01003260
Alexander Graf3d6368e2008-11-25 20:17:07 +01003261 /* In case we don't even reach vcpu_run, the fields are not updated */
3262 svm->vmcb->save.rax = nested_vmcb->save.rax;
3263 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3264 svm->vmcb->save.rip = nested_vmcb->save.rip;
3265 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3266 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3267 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3268
Joerg Roedelf7138532010-03-01 15:34:40 +01003269 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01003270 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003271
Joerg Roedelaad42c62009-08-07 11:49:34 +02003272 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003273 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01003274 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02003275 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3276 svm->nested.intercept = nested_vmcb->control.intercept;
3277
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003278 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003279 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003280 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3281 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3282 else
3283 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3284
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003285 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3286 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003287 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3288 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003289 }
3290
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003291 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003292 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003293
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003294 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003295 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3296 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3297 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003298 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3299 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3300
Joerg Roedel7597f122010-02-19 16:23:00 +01003301 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003302
Joerg Roedel20307532010-11-29 17:51:48 +01003303 /* Enter Guest-Mode */
3304 enter_guest_mode(&svm->vcpu);
3305
Joerg Roedel384c6362010-11-30 18:03:56 +01003306 /*
3307 * Merge guest and host intercepts - must be called with vcpu in
3308 * guest-mode to take affect here
3309 */
3310 recalc_intercepts(svm);
3311
Joerg Roedel06fc77722010-02-19 16:23:07 +01003312 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003313
Joerg Roedel2af91942009-08-07 11:49:28 +02003314 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003315
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003316 mark_all_dirty(svm->vmcb);
Ladi Prosekc2634062017-10-11 16:54:44 +02003317}
3318
3319static bool nested_svm_vmrun(struct vcpu_svm *svm)
3320{
3321 struct vmcb *nested_vmcb;
3322 struct vmcb *hsave = svm->nested.hsave;
3323 struct vmcb *vmcb = svm->vmcb;
3324 struct page *page;
3325 u64 vmcb_gpa;
3326
3327 vmcb_gpa = svm->vmcb->save.rax;
3328
3329 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3330 if (!nested_vmcb)
3331 return false;
3332
3333 if (!nested_vmcb_checks(nested_vmcb)) {
3334 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
3335 nested_vmcb->control.exit_code_hi = 0;
3336 nested_vmcb->control.exit_info_1 = 0;
3337 nested_vmcb->control.exit_info_2 = 0;
3338
3339 nested_svm_unmap(page);
3340
3341 return false;
3342 }
3343
3344 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3345 nested_vmcb->save.rip,
3346 nested_vmcb->control.int_ctl,
3347 nested_vmcb->control.event_inj,
3348 nested_vmcb->control.nested_ctl);
3349
3350 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3351 nested_vmcb->control.intercept_cr >> 16,
3352 nested_vmcb->control.intercept_exceptions,
3353 nested_vmcb->control.intercept);
3354
3355 /* Clear internal status */
3356 kvm_clear_exception_queue(&svm->vcpu);
3357 kvm_clear_interrupt_queue(&svm->vcpu);
3358
3359 /*
3360 * Save the old vmcb, so we don't need to pick what we save, but can
3361 * restore everything when a VMEXIT occurs
3362 */
3363 hsave->save.es = vmcb->save.es;
3364 hsave->save.cs = vmcb->save.cs;
3365 hsave->save.ss = vmcb->save.ss;
3366 hsave->save.ds = vmcb->save.ds;
3367 hsave->save.gdtr = vmcb->save.gdtr;
3368 hsave->save.idtr = vmcb->save.idtr;
3369 hsave->save.efer = svm->vcpu.arch.efer;
3370 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
3371 hsave->save.cr4 = svm->vcpu.arch.cr4;
3372 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3373 hsave->save.rip = kvm_rip_read(&svm->vcpu);
3374 hsave->save.rsp = vmcb->save.rsp;
3375 hsave->save.rax = vmcb->save.rax;
3376 if (npt_enabled)
3377 hsave->save.cr3 = vmcb->save.cr3;
3378 else
3379 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
3380
3381 copy_vmcb_control_area(hsave, vmcb);
3382
3383 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003384
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003385 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003386}
3387
Joerg Roedel9966bf62009-08-07 11:49:40 +02003388static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01003389{
3390 to_vmcb->save.fs = from_vmcb->save.fs;
3391 to_vmcb->save.gs = from_vmcb->save.gs;
3392 to_vmcb->save.tr = from_vmcb->save.tr;
3393 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3394 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3395 to_vmcb->save.star = from_vmcb->save.star;
3396 to_vmcb->save.lstar = from_vmcb->save.lstar;
3397 to_vmcb->save.cstar = from_vmcb->save.cstar;
3398 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3399 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3400 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3401 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01003402}
3403
Avi Kivity851ba692009-08-24 11:10:17 +03003404static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003405{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003406 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003407 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003408 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003409
Alexander Graf55426752008-11-25 20:17:06 +01003410 if (nested_svm_check_permissions(svm))
3411 return 1;
3412
Joerg Roedel7597f122010-02-19 16:23:00 +01003413 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003414 if (!nested_vmcb)
3415 return 1;
3416
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003417 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003418 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003419
Joerg Roedel9966bf62009-08-07 11:49:40 +02003420 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003421 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003422
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003423 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003424}
3425
Avi Kivity851ba692009-08-24 11:10:17 +03003426static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003427{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003428 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003429 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003430 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003431
Alexander Graf55426752008-11-25 20:17:06 +01003432 if (nested_svm_check_permissions(svm))
3433 return 1;
3434
Joerg Roedel7597f122010-02-19 16:23:00 +01003435 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003436 if (!nested_vmcb)
3437 return 1;
3438
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003439 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003440 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003441
Joerg Roedel9966bf62009-08-07 11:49:40 +02003442 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003443 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003444
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003445 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003446}
3447
Avi Kivity851ba692009-08-24 11:10:17 +03003448static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003449{
Alexander Graf3d6368e2008-11-25 20:17:07 +01003450 if (nested_svm_check_permissions(svm))
3451 return 1;
3452
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02003453 /* Save rip after vmrun instruction */
3454 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003455
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003456 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01003457 return 1;
3458
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003459 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02003460 goto failed;
3461
3462 return 1;
3463
3464failed:
3465
3466 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
3467 svm->vmcb->control.exit_code_hi = 0;
3468 svm->vmcb->control.exit_info_1 = 0;
3469 svm->vmcb->control.exit_info_2 = 0;
3470
3471 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003472
3473 return 1;
3474}
3475
Avi Kivity851ba692009-08-24 11:10:17 +03003476static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003477{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003478 int ret;
3479
Alexander Graf1371d902008-11-25 20:17:04 +01003480 if (nested_svm_check_permissions(svm))
3481 return 1;
3482
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003483 /*
3484 * If VGIF is enabled, the STGI intercept is only added to
Ladi Prosekcc3d9672017-10-17 16:02:39 +02003485 * detect the opening of the SMI/NMI window; remove it now.
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003486 */
3487 if (vgif_enabled(svm))
3488 clr_intercept(svm, INTERCEPT_STGI);
3489
Alexander Graf1371d902008-11-25 20:17:04 +01003490 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003491 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03003492 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003493
Joerg Roedel2af91942009-08-07 11:49:28 +02003494 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003495
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003496 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003497}
3498
Avi Kivity851ba692009-08-24 11:10:17 +03003499static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003500{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003501 int ret;
3502
Alexander Graf1371d902008-11-25 20:17:04 +01003503 if (nested_svm_check_permissions(svm))
3504 return 1;
3505
3506 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003507 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003508
Joerg Roedel2af91942009-08-07 11:49:28 +02003509 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003510
3511 /* After a CLGI no interrupts should come */
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05003512 if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3513 svm_clear_vintr(svm);
3514 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3515 mark_dirty(svm->vmcb, VMCB_INTR);
3516 }
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003517
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003518 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003519}
3520
Avi Kivity851ba692009-08-24 11:10:17 +03003521static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02003522{
3523 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02003524
David Kaplan668f1982015-02-20 16:02:10 -06003525 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3526 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedelec1ff792009-10-09 16:08:31 +02003527
Alexander Grafff092382009-06-15 15:21:24 +02003528 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
David Kaplan668f1982015-02-20 16:02:10 -06003529 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Alexander Grafff092382009-06-15 15:21:24 +02003530
3531 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003532 return kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Grafff092382009-06-15 15:21:24 +02003533}
3534
Joerg Roedel532a46b2009-10-09 16:08:32 +02003535static int skinit_interception(struct vcpu_svm *svm)
3536{
David Kaplan668f1982015-02-20 16:02:10 -06003537 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedel532a46b2009-10-09 16:08:32 +02003538
3539 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3540 return 1;
3541}
3542
David Kaplandab429a2015-03-02 13:43:37 -06003543static int wbinvd_interception(struct vcpu_svm *svm)
3544{
Kyle Huey6affcbe2016-11-29 12:40:40 -08003545 return kvm_emulate_wbinvd(&svm->vcpu);
David Kaplandab429a2015-03-02 13:43:37 -06003546}
3547
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003548static int xsetbv_interception(struct vcpu_svm *svm)
3549{
3550 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3551 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3552
3553 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3554 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003555 return kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003556 }
3557
3558 return 1;
3559}
3560
Avi Kivity851ba692009-08-24 11:10:17 +03003561static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003562{
Izik Eidus37817f22008-03-24 23:14:53 +02003563 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003564 int reason;
3565 int int_type = svm->vmcb->control.exit_int_info &
3566 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03003567 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003568 uint32_t type =
3569 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3570 uint32_t idt_v =
3571 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02003572 bool has_error_code = false;
3573 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02003574
3575 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003576
Izik Eidus37817f22008-03-24 23:14:53 +02003577 if (svm->vmcb->control.exit_info_2 &
3578 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003579 reason = TASK_SWITCH_IRET;
3580 else if (svm->vmcb->control.exit_info_2 &
3581 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3582 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003583 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003584 reason = TASK_SWITCH_GATE;
3585 else
3586 reason = TASK_SWITCH_CALL;
3587
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003588 if (reason == TASK_SWITCH_GATE) {
3589 switch (type) {
3590 case SVM_EXITINTINFO_TYPE_NMI:
3591 svm->vcpu.arch.nmi_injected = false;
3592 break;
3593 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02003594 if (svm->vmcb->control.exit_info_2 &
3595 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3596 has_error_code = true;
3597 error_code =
3598 (u32)svm->vmcb->control.exit_info_2;
3599 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003600 kvm_clear_exception_queue(&svm->vcpu);
3601 break;
3602 case SVM_EXITINTINFO_TYPE_INTR:
3603 kvm_clear_interrupt_queue(&svm->vcpu);
3604 break;
3605 default:
3606 break;
3607 }
3608 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003609
Gleb Natapov8317c292009-04-12 13:37:02 +03003610 if (reason != TASK_SWITCH_GATE ||
3611 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3612 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03003613 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3614 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003615
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01003616 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3617 int_vec = -1;
3618
3619 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
Gleb Natapovacb54512010-04-15 21:03:50 +03003620 has_error_code, error_code) == EMULATE_FAIL) {
3621 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3622 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3623 svm->vcpu.run->internal.ndata = 0;
3624 return 0;
3625 }
3626 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003627}
3628
Avi Kivity851ba692009-08-24 11:10:17 +03003629static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003630{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003631 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Kyle Huey6a908b62016-11-29 12:40:37 -08003632 return kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003633}
3634
Avi Kivity851ba692009-08-24 11:10:17 +03003635static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003636{
3637 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003638 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03003639 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003640 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Radim Krčmářf303b4c2014-01-17 20:52:42 +01003641 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003642 return 1;
3643}
3644
Avi Kivity851ba692009-08-24 11:10:17 +03003645static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03003646{
Andre Przywaradf4f31082010-12-21 11:12:06 +01003647 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3648 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3649
3650 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003651 return kvm_skip_emulated_instruction(&svm->vcpu);
Marcelo Tosattia7052892008-09-23 13:18:35 -03003652}
3653
Avi Kivity851ba692009-08-24 11:10:17 +03003654static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003655{
Andre Przywara51d8b662010-12-21 11:12:02 +01003656 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003657}
3658
Avi Kivity332b56e2011-11-10 14:57:24 +02003659static int rdpmc_interception(struct vcpu_svm *svm)
3660{
3661 int err;
3662
3663 if (!static_cpu_has(X86_FEATURE_NRIPS))
3664 return emulate_on_interception(svm);
3665
3666 err = kvm_rdpmc(&svm->vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08003667 return kvm_complete_insn_gp(&svm->vcpu, err);
Avi Kivity332b56e2011-11-10 14:57:24 +02003668}
3669
Xiubo Li52eb5a62015-03-13 17:39:45 +08003670static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3671 unsigned long val)
Joerg Roedel628afd22011-04-04 12:39:36 +02003672{
3673 unsigned long cr0 = svm->vcpu.arch.cr0;
3674 bool ret = false;
3675 u64 intercept;
3676
3677 intercept = svm->nested.intercept;
3678
3679 if (!is_guest_mode(&svm->vcpu) ||
3680 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3681 return false;
3682
3683 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3684 val &= ~SVM_CR0_SELECTIVE_MASK;
3685
3686 if (cr0 ^ val) {
3687 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3688 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3689 }
3690
3691 return ret;
3692}
3693
Andre Przywara7ff76d52010-12-21 11:12:04 +01003694#define CR_VALID (1ULL << 63)
3695
3696static int cr_interception(struct vcpu_svm *svm)
3697{
3698 int reg, cr;
3699 unsigned long val;
3700 int err;
3701
3702 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3703 return emulate_on_interception(svm);
3704
3705 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3706 return emulate_on_interception(svm);
3707
3708 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
David Kaplan5e575182015-03-06 14:44:35 -06003709 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3710 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3711 else
3712 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
Andre Przywara7ff76d52010-12-21 11:12:04 +01003713
3714 err = 0;
3715 if (cr >= 16) { /* mov to cr */
3716 cr -= 16;
3717 val = kvm_register_read(&svm->vcpu, reg);
3718 switch (cr) {
3719 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02003720 if (!check_selective_cr0_intercepted(svm, val))
3721 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02003722 else
3723 return 1;
3724
Andre Przywara7ff76d52010-12-21 11:12:04 +01003725 break;
3726 case 3:
3727 err = kvm_set_cr3(&svm->vcpu, val);
3728 break;
3729 case 4:
3730 err = kvm_set_cr4(&svm->vcpu, val);
3731 break;
3732 case 8:
3733 err = kvm_set_cr8(&svm->vcpu, val);
3734 break;
3735 default:
3736 WARN(1, "unhandled write to CR%d", cr);
3737 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3738 return 1;
3739 }
3740 } else { /* mov from cr */
3741 switch (cr) {
3742 case 0:
3743 val = kvm_read_cr0(&svm->vcpu);
3744 break;
3745 case 2:
3746 val = svm->vcpu.arch.cr2;
3747 break;
3748 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02003749 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003750 break;
3751 case 4:
3752 val = kvm_read_cr4(&svm->vcpu);
3753 break;
3754 case 8:
3755 val = kvm_get_cr8(&svm->vcpu);
3756 break;
3757 default:
3758 WARN(1, "unhandled read from CR%d", cr);
3759 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3760 return 1;
3761 }
3762 kvm_register_write(&svm->vcpu, reg, val);
3763 }
Kyle Huey6affcbe2016-11-29 12:40:40 -08003764 return kvm_complete_insn_gp(&svm->vcpu, err);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003765}
3766
Andre Przywaracae37972010-12-21 11:12:05 +01003767static int dr_interception(struct vcpu_svm *svm)
3768{
3769 int reg, dr;
3770 unsigned long val;
Andre Przywaracae37972010-12-21 11:12:05 +01003771
Paolo Bonzinifacb0132014-02-21 10:32:27 +01003772 if (svm->vcpu.guest_debug == 0) {
3773 /*
3774 * No more DR vmexits; force a reload of the debug registers
3775 * and reenter on this instruction. The next vmexit will
3776 * retrieve the full state of the debug registers.
3777 */
3778 clr_dr_intercepts(svm);
3779 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
3780 return 1;
3781 }
3782
Andre Przywaracae37972010-12-21 11:12:05 +01003783 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3784 return emulate_on_interception(svm);
3785
3786 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3787 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3788
3789 if (dr >= 16) { /* mov to DRn */
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003790 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3791 return 1;
Andre Przywaracae37972010-12-21 11:12:05 +01003792 val = kvm_register_read(&svm->vcpu, reg);
3793 kvm_set_dr(&svm->vcpu, dr - 16, val);
3794 } else {
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003795 if (!kvm_require_dr(&svm->vcpu, dr))
3796 return 1;
3797 kvm_get_dr(&svm->vcpu, dr, &val);
3798 kvm_register_write(&svm->vcpu, reg, val);
Andre Przywaracae37972010-12-21 11:12:05 +01003799 }
3800
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003801 return kvm_skip_emulated_instruction(&svm->vcpu);
Andre Przywaracae37972010-12-21 11:12:05 +01003802}
3803
Avi Kivity851ba692009-08-24 11:10:17 +03003804static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01003805{
Avi Kivity851ba692009-08-24 11:10:17 +03003806 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01003807 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03003808
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003809 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3810 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01003811 r = cr_interception(svm);
Paolo Bonzini35754c92015-07-29 12:05:37 +02003812 if (lapic_in_kernel(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003813 return r;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003814 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003815 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01003816 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3817 return 0;
3818}
3819
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003820static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003821{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003822 struct vcpu_svm *svm = to_svm(vcpu);
3823
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003824 switch (msr_info->index) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05303825 case MSR_IA32_TSC: {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003826 msr_info->data = svm->vmcb->control.tsc_offset +
Haozhong Zhang35181e82015-10-20 15:39:03 +08003827 kvm_scale_tsc(vcpu, rdtsc());
Joerg Roedelfbc0db72011-03-25 09:44:46 +01003828
Avi Kivity6aa8b732006-12-10 02:21:36 -08003829 break;
3830 }
Brian Gerst8c065852010-07-17 09:03:26 -04003831 case MSR_STAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003832 msr_info->data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003833 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08003834#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003835 case MSR_LSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003836 msr_info->data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003837 break;
3838 case MSR_CSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003839 msr_info->data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003840 break;
3841 case MSR_KERNEL_GS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003842 msr_info->data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003843 break;
3844 case MSR_SYSCALL_MASK:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003845 msr_info->data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003846 break;
3847#endif
3848 case MSR_IA32_SYSENTER_CS:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003849 msr_info->data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003850 break;
3851 case MSR_IA32_SYSENTER_EIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003852 msr_info->data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003853 break;
3854 case MSR_IA32_SYSENTER_ESP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003855 msr_info->data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003856 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003857 case MSR_TSC_AUX:
3858 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3859 return 1;
3860 msr_info->data = svm->tsc_aux;
3861 break;
Joerg Roedele0231712010-02-24 18:59:10 +01003862 /*
3863 * Nobody will change the following 5 values in the VMCB so we can
3864 * safely return them on rdmsr. They will always be 0 until LBRV is
3865 * implemented.
3866 */
Joerg Roedela2938c82008-02-13 16:30:28 +01003867 case MSR_IA32_DEBUGCTLMSR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003868 msr_info->data = svm->vmcb->save.dbgctl;
Joerg Roedela2938c82008-02-13 16:30:28 +01003869 break;
3870 case MSR_IA32_LASTBRANCHFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003871 msr_info->data = svm->vmcb->save.br_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003872 break;
3873 case MSR_IA32_LASTBRANCHTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003874 msr_info->data = svm->vmcb->save.br_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003875 break;
3876 case MSR_IA32_LASTINTFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003877 msr_info->data = svm->vmcb->save.last_excp_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003878 break;
3879 case MSR_IA32_LASTINTTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003880 msr_info->data = svm->vmcb->save.last_excp_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003881 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003882 case MSR_VM_HSAVE_PA:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003883 msr_info->data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003884 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003885 case MSR_VM_CR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003886 msr_info->data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003887 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003888 case MSR_IA32_UCODE_REV:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003889 msr_info->data = 0x01000065;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003890 break;
Borislav Petkovae8b7872015-11-23 11:12:23 +01003891 case MSR_F15H_IC_CFG: {
3892
3893 int family, model;
3894
3895 family = guest_cpuid_family(vcpu);
3896 model = guest_cpuid_model(vcpu);
3897
3898 if (family < 0 || model < 0)
3899 return kvm_get_msr_common(vcpu, msr_info);
3900
3901 msr_info->data = 0;
3902
3903 if (family == 0x15 &&
3904 (model >= 0x2 && model < 0x20))
3905 msr_info->data = 0x1E;
3906 }
3907 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003908 default:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003909 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003910 }
3911 return 0;
3912}
3913
Avi Kivity851ba692009-08-24 11:10:17 +03003914static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003915{
David Kaplan668f1982015-02-20 16:02:10 -06003916 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003917 struct msr_data msr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003918
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003919 msr_info.index = ecx;
3920 msr_info.host_initiated = false;
3921 if (svm_get_msr(&svm->vcpu, &msr_info)) {
Avi Kivity59200272010-01-25 19:47:02 +02003922 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003923 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003924 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02003925 } else {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003926 trace_kvm_msr_read(ecx, msr_info.data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003927
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003928 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3929 msr_info.data & 0xffffffff);
3930 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3931 msr_info.data >> 32);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003932 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003933 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003934 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003935}
3936
Joerg Roedel4a810182010-02-24 18:59:15 +01003937static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3938{
3939 struct vcpu_svm *svm = to_svm(vcpu);
3940 int svm_dis, chg_mask;
3941
3942 if (data & ~SVM_VM_CR_VALID_MASK)
3943 return 1;
3944
3945 chg_mask = SVM_VM_CR_VALID_MASK;
3946
3947 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3948 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3949
3950 svm->nested.vm_cr_msr &= ~chg_mask;
3951 svm->nested.vm_cr_msr |= (data & chg_mask);
3952
3953 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3954
3955 /* check for svm_disable while efer.svme is set */
3956 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3957 return 1;
3958
3959 return 0;
3960}
3961
Will Auld8fe8ab42012-11-29 12:42:12 -08003962static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003963{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003964 struct vcpu_svm *svm = to_svm(vcpu);
3965
Will Auld8fe8ab42012-11-29 12:42:12 -08003966 u32 ecx = msr->index;
3967 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003968 switch (ecx) {
Paolo Bonzini15038e12017-10-26 09:13:27 +02003969 case MSR_IA32_CR_PAT:
3970 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3971 return 1;
3972 vcpu->arch.pat = data;
3973 svm->vmcb->save.g_pat = data;
3974 mark_dirty(svm->vmcb, VMCB_NPT);
3975 break;
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10003976 case MSR_IA32_TSC:
Will Auld8fe8ab42012-11-29 12:42:12 -08003977 kvm_write_tsc(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003978 break;
Brian Gerst8c065852010-07-17 09:03:26 -04003979 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003980 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003981 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08003982#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003983 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003984 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003985 break;
3986 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003987 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003988 break;
3989 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003990 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003991 break;
3992 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003993 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003994 break;
3995#endif
3996 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003997 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003998 break;
3999 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02004000 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004001 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004002 break;
4003 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02004004 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004005 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004006 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01004007 case MSR_TSC_AUX:
4008 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4009 return 1;
4010
4011 /*
4012 * This is rare, so we update the MSR here instead of using
4013 * direct_access_msrs. Doing that would require a rdmsr in
4014 * svm_vcpu_put.
4015 */
4016 svm->tsc_aux = data;
4017 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
4018 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01004019 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02004020 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Christoffer Dalla737f252012-06-03 21:17:48 +03004021 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
4022 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01004023 break;
4024 }
4025 if (data & DEBUGCTL_RESERVED_BITS)
4026 return 1;
4027
4028 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01004029 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01004030 if (data & (1ULL<<0))
4031 svm_enable_lbrv(svm);
4032 else
4033 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01004034 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01004035 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02004036 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01004037 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004038 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01004039 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004040 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03004041 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004042 break;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004043 case MSR_IA32_APICBASE:
4044 if (kvm_vcpu_apicv_active(vcpu))
4045 avic_update_vapic_bar(to_svm(vcpu), data);
4046 /* Follow through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004047 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08004048 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004049 }
4050 return 0;
4051}
4052
Avi Kivity851ba692009-08-24 11:10:17 +03004053static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004054{
Will Auld8fe8ab42012-11-29 12:42:12 -08004055 struct msr_data msr;
David Kaplan668f1982015-02-20 16:02:10 -06004056 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4057 u64 data = kvm_read_edx_eax(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004058
Will Auld8fe8ab42012-11-29 12:42:12 -08004059 msr.data = data;
4060 msr.index = ecx;
4061 msr.host_initiated = false;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004062
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004063 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Nadav Amit854e8bb2014-09-16 03:24:05 +03004064 if (kvm_set_msr(&svm->vcpu, &msr)) {
Avi Kivity59200272010-01-25 19:47:02 +02004065 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02004066 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004067 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02004068 } else {
4069 trace_kvm_msr_write(ecx, data);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004070 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02004071 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004072}
4073
Avi Kivity851ba692009-08-24 11:10:17 +03004074static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004075{
Rusty Russelle756fc62007-07-30 20:07:08 +10004076 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03004077 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004078 else
Avi Kivity851ba692009-08-24 11:10:17 +03004079 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004080}
4081
Avi Kivity851ba692009-08-24 11:10:17 +03004082static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08004083{
Avi Kivity3842d132010-07-27 12:30:24 +03004084 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01004085 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03004086 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004087 mark_dirty(svm->vmcb, VMCB_INTR);
Jason Wang675acb72012-03-08 18:07:56 +08004088 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08004089 return 1;
4090}
4091
Mark Langsdorf565d0992009-10-06 14:25:02 -05004092static int pause_interception(struct vcpu_svm *svm)
4093{
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08004094 struct kvm_vcpu *vcpu = &svm->vcpu;
4095 bool in_kernel = (svm_get_cpl(vcpu) == 0);
4096
4097 kvm_vcpu_on_spin(vcpu, in_kernel);
Mark Langsdorf565d0992009-10-06 14:25:02 -05004098 return 1;
4099}
4100
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004101static int nop_interception(struct vcpu_svm *svm)
4102{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004103 return kvm_skip_emulated_instruction(&(svm->vcpu));
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004104}
4105
4106static int monitor_interception(struct vcpu_svm *svm)
4107{
4108 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4109 return nop_interception(svm);
4110}
4111
4112static int mwait_interception(struct vcpu_svm *svm)
4113{
4114 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4115 return nop_interception(svm);
4116}
4117
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004118enum avic_ipi_failure_cause {
4119 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4120 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4121 AVIC_IPI_FAILURE_INVALID_TARGET,
4122 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4123};
4124
4125static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4126{
4127 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4128 u32 icrl = svm->vmcb->control.exit_info_1;
4129 u32 id = svm->vmcb->control.exit_info_2 >> 32;
Dan Carpenter5446a972016-05-23 13:20:10 +03004130 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004131 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4132
4133 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4134
4135 switch (id) {
4136 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4137 /*
4138 * AVIC hardware handles the generation of
4139 * IPIs when the specified Message Type is Fixed
4140 * (also known as fixed delivery mode) and
4141 * the Trigger Mode is edge-triggered. The hardware
4142 * also supports self and broadcast delivery modes
4143 * specified via the Destination Shorthand(DSH)
4144 * field of the ICRL. Logical and physical APIC ID
4145 * formats are supported. All other IPI types cause
4146 * a #VMEXIT, which needs to emulated.
4147 */
4148 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4149 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4150 break;
4151 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4152 int i;
4153 struct kvm_vcpu *vcpu;
4154 struct kvm *kvm = svm->vcpu.kvm;
4155 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4156
4157 /*
4158 * At this point, we expect that the AVIC HW has already
4159 * set the appropriate IRR bits on the valid target
4160 * vcpus. So, we just need to kick the appropriate vcpu.
4161 */
4162 kvm_for_each_vcpu(i, vcpu, kvm) {
4163 bool m = kvm_apic_match_dest(vcpu, apic,
4164 icrl & KVM_APIC_SHORT_MASK,
4165 GET_APIC_DEST_FIELD(icrh),
4166 icrl & KVM_APIC_DEST_MASK);
4167
4168 if (m && !avic_vcpu_is_running(vcpu))
4169 kvm_vcpu_wake_up(vcpu);
4170 }
4171 break;
4172 }
4173 case AVIC_IPI_FAILURE_INVALID_TARGET:
4174 break;
4175 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4176 WARN_ONCE(1, "Invalid backing page\n");
4177 break;
4178 default:
4179 pr_err("Unknown IPI interception\n");
4180 }
4181
4182 return 1;
4183}
4184
4185static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4186{
4187 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4188 int index;
4189 u32 *logical_apic_id_table;
4190 int dlid = GET_APIC_LOGICAL_ID(ldr);
4191
4192 if (!dlid)
4193 return NULL;
4194
4195 if (flat) { /* flat */
4196 index = ffs(dlid) - 1;
4197 if (index > 7)
4198 return NULL;
4199 } else { /* cluster */
4200 int cluster = (dlid & 0xf0) >> 4;
4201 int apic = ffs(dlid & 0x0f) - 1;
4202
4203 if ((apic < 0) || (apic > 7) ||
4204 (cluster >= 0xf))
4205 return NULL;
4206 index = (cluster << 2) + apic;
4207 }
4208
4209 logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
4210
4211 return &logical_apic_id_table[index];
4212}
4213
4214static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
4215 bool valid)
4216{
4217 bool flat;
4218 u32 *entry, new_entry;
4219
4220 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4221 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4222 if (!entry)
4223 return -EINVAL;
4224
4225 new_entry = READ_ONCE(*entry);
4226 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4227 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4228 if (valid)
4229 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4230 else
4231 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4232 WRITE_ONCE(*entry, new_entry);
4233
4234 return 0;
4235}
4236
4237static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4238{
4239 int ret;
4240 struct vcpu_svm *svm = to_svm(vcpu);
4241 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4242
4243 if (!ldr)
4244 return 1;
4245
4246 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
4247 if (ret && svm->ldr_reg) {
4248 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
4249 svm->ldr_reg = 0;
4250 } else {
4251 svm->ldr_reg = ldr;
4252 }
4253 return ret;
4254}
4255
4256static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4257{
4258 u64 *old, *new;
4259 struct vcpu_svm *svm = to_svm(vcpu);
4260 u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4261 u32 id = (apic_id_reg >> 24) & 0xff;
4262
4263 if (vcpu->vcpu_id == id)
4264 return 0;
4265
4266 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4267 new = avic_get_physical_id_entry(vcpu, id);
4268 if (!new || !old)
4269 return 1;
4270
4271 /* We need to move physical_id_entry to new offset */
4272 *new = *old;
4273 *old = 0ULL;
4274 to_svm(vcpu)->avic_physical_id_cache = new;
4275
4276 /*
4277 * Also update the guest physical APIC ID in the logical
4278 * APIC ID table entry if already setup the LDR.
4279 */
4280 if (svm->ldr_reg)
4281 avic_handle_ldr_update(vcpu);
4282
4283 return 0;
4284}
4285
4286static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4287{
4288 struct vcpu_svm *svm = to_svm(vcpu);
4289 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4290 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4291 u32 mod = (dfr >> 28) & 0xf;
4292
4293 /*
4294 * We assume that all local APICs are using the same type.
4295 * If this changes, we need to flush the AVIC logical
4296 * APID id table.
4297 */
4298 if (vm_data->ldr_mode == mod)
4299 return 0;
4300
4301 clear_page(page_address(vm_data->avic_logical_id_table_page));
4302 vm_data->ldr_mode = mod;
4303
4304 if (svm->ldr_reg)
4305 avic_handle_ldr_update(vcpu);
4306 return 0;
4307}
4308
4309static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4310{
4311 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4312 u32 offset = svm->vmcb->control.exit_info_1 &
4313 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4314
4315 switch (offset) {
4316 case APIC_ID:
4317 if (avic_handle_apic_id_update(&svm->vcpu))
4318 return 0;
4319 break;
4320 case APIC_LDR:
4321 if (avic_handle_ldr_update(&svm->vcpu))
4322 return 0;
4323 break;
4324 case APIC_DFR:
4325 avic_handle_dfr_update(&svm->vcpu);
4326 break;
4327 default:
4328 break;
4329 }
4330
4331 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4332
4333 return 1;
4334}
4335
4336static bool is_avic_unaccelerated_access_trap(u32 offset)
4337{
4338 bool ret = false;
4339
4340 switch (offset) {
4341 case APIC_ID:
4342 case APIC_EOI:
4343 case APIC_RRR:
4344 case APIC_LDR:
4345 case APIC_DFR:
4346 case APIC_SPIV:
4347 case APIC_ESR:
4348 case APIC_ICR:
4349 case APIC_LVTT:
4350 case APIC_LVTTHMR:
4351 case APIC_LVTPC:
4352 case APIC_LVT0:
4353 case APIC_LVT1:
4354 case APIC_LVTERR:
4355 case APIC_TMICT:
4356 case APIC_TDCR:
4357 ret = true;
4358 break;
4359 default:
4360 break;
4361 }
4362 return ret;
4363}
4364
4365static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4366{
4367 int ret = 0;
4368 u32 offset = svm->vmcb->control.exit_info_1 &
4369 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4370 u32 vector = svm->vmcb->control.exit_info_2 &
4371 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4372 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4373 AVIC_UNACCEL_ACCESS_WRITE_MASK;
4374 bool trap = is_avic_unaccelerated_access_trap(offset);
4375
4376 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4377 trap, write, vector);
4378 if (trap) {
4379 /* Handling Trap */
4380 WARN_ONCE(!write, "svm: Handling trap read.\n");
4381 ret = avic_unaccel_trap_write(svm);
4382 } else {
4383 /* Handling Fault */
4384 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4385 }
4386
4387 return ret;
4388}
4389
Mathias Krause09941fb2012-08-30 01:30:20 +02004390static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01004391 [SVM_EXIT_READ_CR0] = cr_interception,
4392 [SVM_EXIT_READ_CR3] = cr_interception,
4393 [SVM_EXIT_READ_CR4] = cr_interception,
4394 [SVM_EXIT_READ_CR8] = cr_interception,
David Kaplan5e575182015-03-06 14:44:35 -06004395 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02004396 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01004397 [SVM_EXIT_WRITE_CR3] = cr_interception,
4398 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004399 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01004400 [SVM_EXIT_READ_DR0] = dr_interception,
4401 [SVM_EXIT_READ_DR1] = dr_interception,
4402 [SVM_EXIT_READ_DR2] = dr_interception,
4403 [SVM_EXIT_READ_DR3] = dr_interception,
4404 [SVM_EXIT_READ_DR4] = dr_interception,
4405 [SVM_EXIT_READ_DR5] = dr_interception,
4406 [SVM_EXIT_READ_DR6] = dr_interception,
4407 [SVM_EXIT_READ_DR7] = dr_interception,
4408 [SVM_EXIT_WRITE_DR0] = dr_interception,
4409 [SVM_EXIT_WRITE_DR1] = dr_interception,
4410 [SVM_EXIT_WRITE_DR2] = dr_interception,
4411 [SVM_EXIT_WRITE_DR3] = dr_interception,
4412 [SVM_EXIT_WRITE_DR4] = dr_interception,
4413 [SVM_EXIT_WRITE_DR5] = dr_interception,
4414 [SVM_EXIT_WRITE_DR6] = dr_interception,
4415 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004416 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
4417 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05004418 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004419 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004420 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Eric Northup54a20552015-11-03 18:03:53 +01004421 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004422 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02004423 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004424 [SVM_EXIT_SMI] = nop_on_interception,
4425 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08004426 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02004427 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004428 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004429 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02004430 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05004431 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004432 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03004433 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02004434 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004435 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004436 [SVM_EXIT_MSR] = msr_interception,
4437 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08004438 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01004439 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02004440 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01004441 [SVM_EXIT_VMLOAD] = vmload_interception,
4442 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01004443 [SVM_EXIT_STGI] = stgi_interception,
4444 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02004445 [SVM_EXIT_SKINIT] = skinit_interception,
David Kaplandab429a2015-03-02 13:43:37 -06004446 [SVM_EXIT_WBINVD] = wbinvd_interception,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004447 [SVM_EXIT_MONITOR] = monitor_interception,
4448 [SVM_EXIT_MWAIT] = mwait_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01004449 [SVM_EXIT_XSETBV] = xsetbv_interception,
Paolo Bonzinid0006532017-08-11 18:36:43 +02004450 [SVM_EXIT_NPF] = npf_interception,
Paolo Bonzini64d60672015-05-07 11:36:11 +02004451 [SVM_EXIT_RSM] = emulate_on_interception,
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004452 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4453 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004454};
4455
Joe Perchesae8cc052011-04-24 22:00:50 -07004456static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02004457{
4458 struct vcpu_svm *svm = to_svm(vcpu);
4459 struct vmcb_control_area *control = &svm->vmcb->control;
4460 struct vmcb_save_area *save = &svm->vmcb->save;
4461
4462 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004463 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4464 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4465 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4466 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4467 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4468 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4469 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4470 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4471 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4472 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4473 pr_err("%-20s%d\n", "asid:", control->asid);
4474 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4475 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4476 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4477 pr_err("%-20s%08x\n", "int_state:", control->int_state);
4478 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4479 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4480 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4481 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4482 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4483 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4484 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004485 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
Joe Perchesae8cc052011-04-24 22:00:50 -07004486 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4487 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05004488 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
Joe Perchesae8cc052011-04-24 22:00:50 -07004489 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004490 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4491 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4492 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004493 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004494 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4495 "es:",
4496 save->es.selector, save->es.attrib,
4497 save->es.limit, save->es.base);
4498 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4499 "cs:",
4500 save->cs.selector, save->cs.attrib,
4501 save->cs.limit, save->cs.base);
4502 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4503 "ss:",
4504 save->ss.selector, save->ss.attrib,
4505 save->ss.limit, save->ss.base);
4506 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4507 "ds:",
4508 save->ds.selector, save->ds.attrib,
4509 save->ds.limit, save->ds.base);
4510 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4511 "fs:",
4512 save->fs.selector, save->fs.attrib,
4513 save->fs.limit, save->fs.base);
4514 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4515 "gs:",
4516 save->gs.selector, save->gs.attrib,
4517 save->gs.limit, save->gs.base);
4518 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4519 "gdtr:",
4520 save->gdtr.selector, save->gdtr.attrib,
4521 save->gdtr.limit, save->gdtr.base);
4522 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4523 "ldtr:",
4524 save->ldtr.selector, save->ldtr.attrib,
4525 save->ldtr.limit, save->ldtr.base);
4526 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4527 "idtr:",
4528 save->idtr.selector, save->idtr.attrib,
4529 save->idtr.limit, save->idtr.base);
4530 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4531 "tr:",
4532 save->tr.selector, save->tr.attrib,
4533 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004534 pr_err("cpl: %d efer: %016llx\n",
4535 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07004536 pr_err("%-15s %016llx %-13s %016llx\n",
4537 "cr0:", save->cr0, "cr2:", save->cr2);
4538 pr_err("%-15s %016llx %-13s %016llx\n",
4539 "cr3:", save->cr3, "cr4:", save->cr4);
4540 pr_err("%-15s %016llx %-13s %016llx\n",
4541 "dr6:", save->dr6, "dr7:", save->dr7);
4542 pr_err("%-15s %016llx %-13s %016llx\n",
4543 "rip:", save->rip, "rflags:", save->rflags);
4544 pr_err("%-15s %016llx %-13s %016llx\n",
4545 "rsp:", save->rsp, "rax:", save->rax);
4546 pr_err("%-15s %016llx %-13s %016llx\n",
4547 "star:", save->star, "lstar:", save->lstar);
4548 pr_err("%-15s %016llx %-13s %016llx\n",
4549 "cstar:", save->cstar, "sfmask:", save->sfmask);
4550 pr_err("%-15s %016llx %-13s %016llx\n",
4551 "kernel_gs_base:", save->kernel_gs_base,
4552 "sysenter_cs:", save->sysenter_cs);
4553 pr_err("%-15s %016llx %-13s %016llx\n",
4554 "sysenter_esp:", save->sysenter_esp,
4555 "sysenter_eip:", save->sysenter_eip);
4556 pr_err("%-15s %016llx %-13s %016llx\n",
4557 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4558 pr_err("%-15s %016llx %-13s %016llx\n",
4559 "br_from:", save->br_from, "br_to:", save->br_to);
4560 pr_err("%-15s %016llx %-13s %016llx\n",
4561 "excp_from:", save->last_excp_from,
4562 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004563}
4564
Avi Kivity586f9602010-11-18 13:09:54 +02004565static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4566{
4567 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4568
4569 *info1 = control->exit_info_1;
4570 *info2 = control->exit_info_2;
4571}
4572
Avi Kivity851ba692009-08-24 11:10:17 +03004573static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004574{
Avi Kivity04d2cc72007-09-10 18:10:54 +03004575 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03004576 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004577 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004578
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01004579 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4580
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004581 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02004582 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4583 if (npt_enabled)
4584 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004585
Joerg Roedelcd3ff652009-10-09 16:08:26 +02004586 if (unlikely(svm->nested.exit_required)) {
4587 nested_svm_vmexit(svm);
4588 svm->nested.exit_required = false;
4589
4590 return 1;
4591 }
4592
Joerg Roedel20307532010-11-29 17:51:48 +01004593 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02004594 int vmexit;
4595
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004596 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4597 svm->vmcb->control.exit_info_1,
4598 svm->vmcb->control.exit_info_2,
4599 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01004600 svm->vmcb->control.exit_int_info_err,
4601 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004602
Joerg Roedel410e4d52009-08-07 11:49:44 +02004603 vmexit = nested_svm_exit_special(svm);
4604
4605 if (vmexit == NESTED_EXIT_CONTINUE)
4606 vmexit = nested_svm_exit_handled(svm);
4607
4608 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01004609 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01004610 }
4611
Joerg Roedela5c38322009-08-07 11:49:32 +02004612 svm_complete_interrupts(svm);
4613
Avi Kivity04d2cc72007-09-10 18:10:54 +03004614 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4615 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4616 kvm_run->fail_entry.hardware_entry_failure_reason
4617 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02004618 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4619 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03004620 return 0;
4621 }
4622
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004623 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01004624 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02004625 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4626 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Borislav Petkov6614c7d2013-04-26 00:22:01 +02004627 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity6aa8b732006-12-10 02:21:36 -08004628 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004629 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004630 exit_code);
4631
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02004632 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08004633 || !svm_exit_handlers[exit_code]) {
Bandan Dasfaac2452015-03-16 17:18:25 -04004634 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
Michael S. Tsirkin2bc19dc2014-09-18 16:21:16 +03004635 kvm_queue_exception(vcpu, UD_VECTOR);
4636 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004637 }
4638
Avi Kivity851ba692009-08-24 11:10:17 +03004639 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004640}
4641
4642static void reload_tss(struct kvm_vcpu *vcpu)
4643{
4644 int cpu = raw_smp_processor_id();
4645
Tejun Heo0fe1e002009-10-29 22:34:14 +09004646 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4647 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004648 load_TR_desc();
4649}
4650
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004651static void pre_sev_run(struct vcpu_svm *svm, int cpu)
4652{
4653 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4654 int asid = sev_get_asid(svm->vcpu.kvm);
4655
4656 /* Assign the asid allocated with this SEV guest */
4657 svm->vmcb->control.asid = asid;
4658
4659 /*
4660 * Flush guest TLB:
4661 *
4662 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
4663 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
4664 */
4665 if (sd->sev_vmcbs[asid] == svm->vmcb &&
4666 svm->last_cpu == cpu)
4667 return;
4668
4669 svm->last_cpu = cpu;
4670 sd->sev_vmcbs[asid] = svm->vmcb;
4671 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4672 mark_dirty(svm->vmcb, VMCB_ASID);
4673}
4674
Rusty Russelle756fc62007-07-30 20:07:08 +10004675static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004676{
4677 int cpu = raw_smp_processor_id();
4678
Tejun Heo0fe1e002009-10-29 22:34:14 +09004679 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004680
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004681 if (sev_guest(svm->vcpu.kvm))
4682 return pre_sev_run(svm, cpu);
4683
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03004684 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09004685 if (svm->asid_generation != sd->asid_generation)
4686 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004687}
4688
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004689static void svm_inject_nmi(struct kvm_vcpu *vcpu)
4690{
4691 struct vcpu_svm *svm = to_svm(vcpu);
4692
4693 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
4694 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004695 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004696 ++vcpu->stat.nmi_injections;
4697}
Avi Kivity6aa8b732006-12-10 02:21:36 -08004698
Eddie Dong85f455f2007-07-06 12:20:49 +03004699static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004700{
4701 struct vmcb_control_area *control;
4702
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004703 /* The following fields are ignored when AVIC is enabled */
Rusty Russelle756fc62007-07-30 20:07:08 +10004704 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03004705 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004706 control->int_ctl &= ~V_INTR_PRIO_MASK;
4707 control->int_ctl |= V_IRQ_MASK |
4708 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004709 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004710}
4711
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004712static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03004713{
4714 struct vcpu_svm *svm = to_svm(vcpu);
4715
Joerg Roedel2af91942009-08-07 11:49:28 +02004716 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01004717
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03004718 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
4719 ++vcpu->stat.irq_injections;
4720
Alexander Graf219b65d2009-06-15 15:21:25 +02004721 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
4722 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03004723}
4724
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004725static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
4726{
4727 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
4728}
4729
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004730static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
4731{
4732 struct vcpu_svm *svm = to_svm(vcpu);
4733
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004734 if (svm_nested_virtualize_tpr(vcpu) ||
4735 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01004736 return;
4737
Radim Krčmář596f3142014-03-11 19:11:18 +01004738 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4739
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004740 if (irr == -1)
4741 return;
4742
4743 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004744 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004745}
4746
Yang Zhang8d146952013-01-25 10:18:50 +08004747static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
4748{
4749 return;
4750}
4751
Suravee Suthikulpanitb2a05fe2017-09-12 10:42:41 -05004752static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004753{
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004754 return avic && irqchip_split(vcpu->kvm);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004755}
4756
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004757static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
4758{
4759}
4760
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02004761static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004762{
4763}
4764
4765/* Note: Currently only used by Hyper-V. */
Andrey Smetanind62caab2015-11-10 15:36:33 +03004766static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4767{
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004768 struct vcpu_svm *svm = to_svm(vcpu);
4769 struct vmcb *vmcb = svm->vmcb;
4770
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004771 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004772 return;
4773
4774 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
4775 mark_dirty(vmcb, VMCB_INTR);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004776}
4777
Andrey Smetanin63086302015-11-10 15:36:32 +03004778static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004779{
4780 return;
4781}
4782
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004783static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4784{
4785 kvm_lapic_set_irr(vec, vcpu->arch.apic);
4786 smp_mb__after_atomic();
4787
4788 if (avic_vcpu_is_running(vcpu))
4789 wrmsrl(SVM_AVIC_DOORBELL,
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05004790 kvm_cpu_get_apicid(vcpu->cpu));
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004791 else
4792 kvm_vcpu_wake_up(vcpu);
4793}
4794
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004795static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4796{
4797 unsigned long flags;
4798 struct amd_svm_iommu_ir *cur;
4799
4800 spin_lock_irqsave(&svm->ir_list_lock, flags);
4801 list_for_each_entry(cur, &svm->ir_list, node) {
4802 if (cur->data != pi->ir_data)
4803 continue;
4804 list_del(&cur->node);
4805 kfree(cur);
4806 break;
4807 }
4808 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4809}
4810
4811static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4812{
4813 int ret = 0;
4814 unsigned long flags;
4815 struct amd_svm_iommu_ir *ir;
4816
4817 /**
4818 * In some cases, the existing irte is updaed and re-set,
4819 * so we need to check here if it's already been * added
4820 * to the ir_list.
4821 */
4822 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
4823 struct kvm *kvm = svm->vcpu.kvm;
4824 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
4825 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
4826 struct vcpu_svm *prev_svm;
4827
4828 if (!prev_vcpu) {
4829 ret = -EINVAL;
4830 goto out;
4831 }
4832
4833 prev_svm = to_svm(prev_vcpu);
4834 svm_ir_list_del(prev_svm, pi);
4835 }
4836
4837 /**
4838 * Allocating new amd_iommu_pi_data, which will get
4839 * add to the per-vcpu ir_list.
4840 */
4841 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
4842 if (!ir) {
4843 ret = -ENOMEM;
4844 goto out;
4845 }
4846 ir->data = pi->ir_data;
4847
4848 spin_lock_irqsave(&svm->ir_list_lock, flags);
4849 list_add(&ir->node, &svm->ir_list);
4850 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4851out:
4852 return ret;
4853}
4854
4855/**
4856 * Note:
4857 * The HW cannot support posting multicast/broadcast
4858 * interrupts to a vCPU. So, we still use legacy interrupt
4859 * remapping for these kind of interrupts.
4860 *
4861 * For lowest-priority interrupts, we only support
4862 * those with single CPU as the destination, e.g. user
4863 * configures the interrupts via /proc/irq or uses
4864 * irqbalance to make the interrupts single-CPU.
4865 */
4866static int
4867get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
4868 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
4869{
4870 struct kvm_lapic_irq irq;
4871 struct kvm_vcpu *vcpu = NULL;
4872
4873 kvm_set_msi_irq(kvm, e, &irq);
4874
4875 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
4876 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
4877 __func__, irq.vector);
4878 return -1;
4879 }
4880
4881 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
4882 irq.vector);
4883 *svm = to_svm(vcpu);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05004884 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004885 vcpu_info->vector = irq.vector;
4886
4887 return 0;
4888}
4889
4890/*
4891 * svm_update_pi_irte - set IRTE for Posted-Interrupts
4892 *
4893 * @kvm: kvm
4894 * @host_irq: host irq of the interrupt
4895 * @guest_irq: gsi of the interrupt
4896 * @set: set or unset PI
4897 * returns 0 on success, < 0 on failure
4898 */
4899static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4900 uint32_t guest_irq, bool set)
4901{
4902 struct kvm_kernel_irq_routing_entry *e;
4903 struct kvm_irq_routing_table *irq_rt;
4904 int idx, ret = -EINVAL;
4905
4906 if (!kvm_arch_has_assigned_device(kvm) ||
4907 !irq_remapping_cap(IRQ_POSTING_CAP))
4908 return 0;
4909
4910 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
4911 __func__, host_irq, guest_irq, set);
4912
4913 idx = srcu_read_lock(&kvm->irq_srcu);
4914 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
4915 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
4916
4917 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
4918 struct vcpu_data vcpu_info;
4919 struct vcpu_svm *svm = NULL;
4920
4921 if (e->type != KVM_IRQ_ROUTING_MSI)
4922 continue;
4923
4924 /**
4925 * Here, we setup with legacy mode in the following cases:
4926 * 1. When cannot target interrupt to a specific vcpu.
4927 * 2. Unsetting posted interrupt.
4928 * 3. APIC virtialization is disabled for the vcpu.
4929 */
4930 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
4931 kvm_vcpu_apicv_active(&svm->vcpu)) {
4932 struct amd_iommu_pi_data pi;
4933
4934 /* Try to enable guest_mode in IRTE */
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05004935 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
4936 AVIC_HPA_MASK);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004937 pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
4938 svm->vcpu.vcpu_id);
4939 pi.is_guest_mode = true;
4940 pi.vcpu_data = &vcpu_info;
4941 ret = irq_set_vcpu_affinity(host_irq, &pi);
4942
4943 /**
4944 * Here, we successfully setting up vcpu affinity in
4945 * IOMMU guest mode. Now, we need to store the posted
4946 * interrupt information in a per-vcpu ir_list so that
4947 * we can reference to them directly when we update vcpu
4948 * scheduling information in IOMMU irte.
4949 */
4950 if (!ret && pi.is_guest_mode)
4951 svm_ir_list_add(svm, &pi);
4952 } else {
4953 /* Use legacy mode in IRTE */
4954 struct amd_iommu_pi_data pi;
4955
4956 /**
4957 * Here, pi is used to:
4958 * - Tell IOMMU to use legacy mode for this interrupt.
4959 * - Retrieve ga_tag of prior interrupt remapping data.
4960 */
4961 pi.is_guest_mode = false;
4962 ret = irq_set_vcpu_affinity(host_irq, &pi);
4963
4964 /**
4965 * Check if the posted interrupt was previously
4966 * setup with the guest_mode by checking if the ga_tag
4967 * was cached. If so, we need to clean up the per-vcpu
4968 * ir_list.
4969 */
4970 if (!ret && pi.prev_ga_tag) {
4971 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
4972 struct kvm_vcpu *vcpu;
4973
4974 vcpu = kvm_get_vcpu_by_id(kvm, id);
4975 if (vcpu)
4976 svm_ir_list_del(to_svm(vcpu), &pi);
4977 }
4978 }
4979
4980 if (!ret && svm) {
4981 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
4982 host_irq, e->gsi,
4983 vcpu_info.vector,
4984 vcpu_info.pi_desc_addr, set);
4985 }
4986
4987 if (ret < 0) {
4988 pr_err("%s: failed to update PI IRTE\n", __func__);
4989 goto out;
4990 }
4991 }
4992
4993 ret = 0;
4994out:
4995 srcu_read_unlock(&kvm->irq_srcu, idx);
4996 return ret;
4997}
4998
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004999static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02005000{
5001 struct vcpu_svm *svm = to_svm(vcpu);
5002 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02005003 int ret;
5004 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
5005 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
5006 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
5007
5008 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02005009}
5010
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005011static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
5012{
5013 struct vcpu_svm *svm = to_svm(vcpu);
5014
5015 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
5016}
5017
5018static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5019{
5020 struct vcpu_svm *svm = to_svm(vcpu);
5021
5022 if (masked) {
5023 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01005024 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005025 } else {
5026 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01005027 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005028 }
5029}
5030
Gleb Natapov78646122009-03-23 12:12:11 +02005031static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
5032{
5033 struct vcpu_svm *svm = to_svm(vcpu);
5034 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005035 int ret;
5036
5037 if (!gif_set(svm) ||
5038 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
5039 return 0;
5040
Avi Kivityf6e78472010-08-02 15:30:20 +03005041 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005042
Joerg Roedel20307532010-11-29 17:51:48 +01005043 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005044 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5045
5046 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02005047}
5048
Jan Kiszkac9a79532014-03-07 20:03:15 +01005049static void enable_irq_window(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03005050{
Alexander Graf219b65d2009-06-15 15:21:25 +02005051 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02005052
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05005053 if (kvm_vcpu_apicv_active(vcpu))
5054 return;
5055
Joerg Roedele0231712010-02-24 18:59:10 +01005056 /*
5057 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5058 * 1, because that's a separate STGI/VMRUN intercept. The next time we
5059 * get that intercept, this function will be called again though and
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005060 * we'll get the vintr intercept. However, if the vGIF feature is
5061 * enabled, the STGI interception will not occur. Enable the irq
5062 * window under the assumption that the hardware will set the GIF.
Joerg Roedele0231712010-02-24 18:59:10 +01005063 */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005064 if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02005065 svm_set_vintr(svm);
5066 svm_inject_irq(svm, 0x0);
5067 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005068}
5069
Jan Kiszkac9a79532014-03-07 20:03:15 +01005070static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005071{
Avi Kivity04d2cc72007-09-10 18:10:54 +03005072 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03005073
Gleb Natapov44c11432009-05-11 13:35:52 +03005074 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5075 == HF_NMI_MASK)
Jan Kiszkac9a79532014-03-07 20:03:15 +01005076 return; /* IRET will cause a vm exit */
Gleb Natapov44c11432009-05-11 13:35:52 +03005077
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005078 if (!gif_set(svm)) {
5079 if (vgif_enabled(svm))
5080 set_intercept(svm, INTERCEPT_STGI);
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005081 return; /* STGI will cause a vm exit */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005082 }
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005083
5084 if (svm->nested.exit_required)
5085 return; /* we're not going to run the guest yet */
5086
Joerg Roedele0231712010-02-24 18:59:10 +01005087 /*
5088 * Something prevents NMI from been injected. Single step over possible
5089 * problem (IRET or exception injection or interrupt shadow)
5090 */
Ladi Prosekab2f4d732017-06-21 09:06:58 +02005091 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
Jan Kiszka6be7d302009-10-18 13:24:54 +02005092 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03005093 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Eddie Dong85f455f2007-07-06 12:20:49 +03005094}
5095
Izik Eiduscbc94022007-10-25 00:29:55 +02005096static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5097{
5098 return 0;
5099}
5100
Avi Kivityd9e368d2007-06-07 19:18:30 +03005101static void svm_flush_tlb(struct kvm_vcpu *vcpu)
5102{
Joerg Roedel38e5e922010-12-03 15:25:16 +01005103 struct vcpu_svm *svm = to_svm(vcpu);
5104
5105 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5106 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5107 else
5108 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03005109}
5110
Avi Kivity04d2cc72007-09-10 18:10:54 +03005111static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5112{
5113}
5114
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005115static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5116{
5117 struct vcpu_svm *svm = to_svm(vcpu);
5118
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005119 if (svm_nested_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005120 return;
5121
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01005122 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005123 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03005124 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005125 }
5126}
5127
Joerg Roedel649d6862008-04-16 16:51:15 +02005128static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5129{
5130 struct vcpu_svm *svm = to_svm(vcpu);
5131 u64 cr8;
5132
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005133 if (svm_nested_virtualize_tpr(vcpu) ||
5134 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005135 return;
5136
Joerg Roedel649d6862008-04-16 16:51:15 +02005137 cr8 = kvm_get_cr8(vcpu);
5138 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5139 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5140}
5141
Gleb Natapov9222be12009-04-23 17:14:37 +03005142static void svm_complete_interrupts(struct vcpu_svm *svm)
5143{
5144 u8 vector;
5145 int type;
5146 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01005147 unsigned int3_injected = svm->int3_injected;
5148
5149 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005150
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02005151 /*
5152 * If we've made progress since setting HF_IRET_MASK, we've
5153 * executed an IRET and can allow NMI injection.
5154 */
5155 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5156 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03005157 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03005158 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5159 }
Gleb Natapov44c11432009-05-11 13:35:52 +03005160
Gleb Natapov9222be12009-04-23 17:14:37 +03005161 svm->vcpu.arch.nmi_injected = false;
5162 kvm_clear_exception_queue(&svm->vcpu);
5163 kvm_clear_interrupt_queue(&svm->vcpu);
5164
5165 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5166 return;
5167
Avi Kivity3842d132010-07-27 12:30:24 +03005168 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5169
Gleb Natapov9222be12009-04-23 17:14:37 +03005170 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5171 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5172
5173 switch (type) {
5174 case SVM_EXITINTINFO_TYPE_NMI:
5175 svm->vcpu.arch.nmi_injected = true;
5176 break;
5177 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01005178 /*
5179 * In case of software exceptions, do not reinject the vector,
5180 * but re-execute the instruction instead. Rewind RIP first
5181 * if we emulated INT3 before.
5182 */
5183 if (kvm_exception_is_soft(vector)) {
5184 if (vector == BP_VECTOR && int3_injected &&
5185 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5186 kvm_rip_write(&svm->vcpu,
5187 kvm_rip_read(&svm->vcpu) -
5188 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02005189 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01005190 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005191 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5192 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005193 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03005194
5195 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005196 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03005197 break;
5198 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005199 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03005200 break;
5201 default:
5202 break;
5203 }
5204}
5205
Avi Kivityb463a6f2010-07-20 15:06:17 +03005206static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5207{
5208 struct vcpu_svm *svm = to_svm(vcpu);
5209 struct vmcb_control_area *control = &svm->vmcb->control;
5210
5211 control->exit_int_info = control->event_inj;
5212 control->exit_int_info_err = control->event_inj_err;
5213 control->event_inj = 0;
5214 svm_complete_interrupts(svm);
5215}
5216
Avi Kivity851ba692009-08-24 11:10:17 +03005217static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005218{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005219 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03005220
Joerg Roedel2041a062010-04-22 12:33:08 +02005221 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5222 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5223 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5224
Joerg Roedelcd3ff652009-10-09 16:08:26 +02005225 /*
5226 * A vmexit emulation is required before the vcpu can be executed
5227 * again.
5228 */
5229 if (unlikely(svm->nested.exit_required))
5230 return;
5231
Ladi Proseka12713c2017-06-21 09:07:00 +02005232 /*
5233 * Disable singlestep if we're injecting an interrupt/exception.
5234 * We don't want our modified rflags to be pushed on the stack where
5235 * we might not be able to easily reset them if we disabled NMI
5236 * singlestep later.
5237 */
5238 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5239 /*
5240 * Event injection happens before external interrupts cause a
5241 * vmexit and interrupts are disabled here, so smp_send_reschedule
5242 * is enough to force an immediate vmexit.
5243 */
5244 disable_nmi_singlestep(svm);
5245 smp_send_reschedule(vcpu->cpu);
5246 }
5247
Rusty Russelle756fc62007-07-30 20:07:08 +10005248 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005249
Joerg Roedel649d6862008-04-16 16:51:15 +02005250 sync_lapic_to_cr8(vcpu);
5251
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02005252 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005253
Avi Kivity04d2cc72007-09-10 18:10:54 +03005254 clgi();
5255
5256 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08005257
Avi Kivity6aa8b732006-12-10 02:21:36 -08005258 asm volatile (
Avi Kivity74547662012-09-16 15:10:59 +03005259 "push %%" _ASM_BP "; \n\t"
5260 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5261 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5262 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5263 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5264 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5265 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005266#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005267 "mov %c[r8](%[svm]), %%r8 \n\t"
5268 "mov %c[r9](%[svm]), %%r9 \n\t"
5269 "mov %c[r10](%[svm]), %%r10 \n\t"
5270 "mov %c[r11](%[svm]), %%r11 \n\t"
5271 "mov %c[r12](%[svm]), %%r12 \n\t"
5272 "mov %c[r13](%[svm]), %%r13 \n\t"
5273 "mov %c[r14](%[svm]), %%r14 \n\t"
5274 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005275#endif
5276
Avi Kivity6aa8b732006-12-10 02:21:36 -08005277 /* Enter guest mode */
Avi Kivity74547662012-09-16 15:10:59 +03005278 "push %%" _ASM_AX " \n\t"
5279 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03005280 __ex(SVM_VMLOAD) "\n\t"
5281 __ex(SVM_VMRUN) "\n\t"
5282 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity74547662012-09-16 15:10:59 +03005283 "pop %%" _ASM_AX " \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005284
5285 /* Save guest registers, load host registers */
Avi Kivity74547662012-09-16 15:10:59 +03005286 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5287 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5288 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5289 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5290 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5291 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005292#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005293 "mov %%r8, %c[r8](%[svm]) \n\t"
5294 "mov %%r9, %c[r9](%[svm]) \n\t"
5295 "mov %%r10, %c[r10](%[svm]) \n\t"
5296 "mov %%r11, %c[r11](%[svm]) \n\t"
5297 "mov %%r12, %c[r12](%[svm]) \n\t"
5298 "mov %%r13, %c[r13](%[svm]) \n\t"
5299 "mov %%r14, %c[r14](%[svm]) \n\t"
5300 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005301#endif
Avi Kivity74547662012-09-16 15:10:59 +03005302 "pop %%" _ASM_BP
Avi Kivity6aa8b732006-12-10 02:21:36 -08005303 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005304 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08005305 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005306 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5307 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5308 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5309 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5310 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5311 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005312#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005313 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5314 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5315 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5316 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5317 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5318 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5319 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5320 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08005321#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02005322 : "cc", "memory"
5323#ifdef CONFIG_X86_64
Avi Kivity74547662012-09-16 15:10:59 +03005324 , "rbx", "rcx", "rdx", "rsi", "rdi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005325 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Avi Kivity74547662012-09-16 15:10:59 +03005326#else
5327 , "ebx", "ecx", "edx", "esi", "edi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005328#endif
5329 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08005330
Avi Kivity82ca2d12010-10-21 12:20:34 +02005331#ifdef CONFIG_X86_64
5332 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5333#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02005334 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02005335#ifndef CONFIG_X86_32_LAZY_GS
5336 loadsegment(gs, svm->host.gs);
5337#endif
Avi Kivity9581d442010-10-19 16:46:55 +02005338#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08005339
5340 reload_tss(vcpu);
5341
Avi Kivity56ba47d2007-11-07 17:14:18 +02005342 local_irq_disable();
5343
Avi Kivity13c34e02010-10-21 12:20:31 +02005344 vcpu->arch.cr2 = svm->vmcb->save.cr2;
5345 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5346 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5347 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5348
Joerg Roedel3781c012011-01-14 16:45:02 +01005349 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5350 kvm_before_handle_nmi(&svm->vcpu);
5351
5352 stgi();
5353
5354 /* Any pending NMI will happen here */
5355
5356 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5357 kvm_after_handle_nmi(&svm->vcpu);
5358
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005359 sync_cr8_to_lapic(vcpu);
5360
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005361 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005362
Joerg Roedel38e5e922010-12-03 15:25:16 +01005363 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5364
Gleb Natapov631bc482010-10-14 11:22:52 +02005365 /* if exit due to PF check for async PF */
5366 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
Wanpeng Li1261bfa2017-07-13 18:30:40 -07005367 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
Gleb Natapov631bc482010-10-14 11:22:52 +02005368
Avi Kivity6de4f3a2009-05-31 22:58:47 +03005369 if (npt_enabled) {
5370 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5371 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5372 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02005373
5374 /*
5375 * We need to handle MC intercepts here before the vcpu has a chance to
5376 * change the physical cpu
5377 */
5378 if (unlikely(svm->vmcb->control.exit_code ==
5379 SVM_EXIT_EXCP_BASE + MC_VECTOR))
5380 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01005381
5382 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005383}
Josh Poimboeufc207aee2017-06-28 10:11:06 -05005384STACK_FRAME_NON_STANDARD(svm_vcpu_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005385
Avi Kivity6aa8b732006-12-10 02:21:36 -08005386static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5387{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005388 struct vcpu_svm *svm = to_svm(vcpu);
5389
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005390 svm->vmcb->save.cr3 = __sme_set(root);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005391 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01005392 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005393}
5394
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005395static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5396{
5397 struct vcpu_svm *svm = to_svm(vcpu);
5398
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005399 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01005400 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005401
5402 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02005403 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005404 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005405
Joerg Roedelf40f6a42010-12-03 15:25:15 +01005406 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005407}
5408
Avi Kivity6aa8b732006-12-10 02:21:36 -08005409static int is_disabled(void)
5410{
Joerg Roedel6031a612007-06-22 12:29:50 +03005411 u64 vm_cr;
5412
5413 rdmsrl(MSR_VM_CR, vm_cr);
5414 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5415 return 1;
5416
Avi Kivity6aa8b732006-12-10 02:21:36 -08005417 return 0;
5418}
5419
Ingo Molnar102d8322007-02-19 14:37:47 +02005420static void
5421svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5422{
5423 /*
5424 * Patch in the VMMCALL instruction:
5425 */
5426 hypercall[0] = 0x0f;
5427 hypercall[1] = 0x01;
5428 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02005429}
5430
Yang, Sheng002c7f72007-07-31 14:23:01 +03005431static void svm_check_processor_compat(void *rtn)
5432{
5433 *(int *)rtn = 0;
5434}
5435
Avi Kivity774ead32007-12-26 13:57:04 +02005436static bool svm_cpu_has_accelerated_tpr(void)
5437{
5438 return false;
5439}
5440
Paolo Bonzini6d396b52015-04-01 14:25:33 +02005441static bool svm_has_high_real_mode_segbase(void)
5442{
5443 return true;
5444}
5445
Paolo Bonzinifc07e762015-10-01 13:20:22 +02005446static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5447{
5448 return 0;
5449}
5450
Sheng Yang0e851882009-12-18 16:48:46 +08005451static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5452{
Joerg Roedel6092d3d2015-10-14 15:10:54 +02005453 struct vcpu_svm *svm = to_svm(vcpu);
5454
5455 /* Update nrips enabled cache */
Radim Krčmářd6321d42017-08-05 00:12:49 +02005456 svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005457
5458 if (!kvm_vcpu_apicv_active(vcpu))
5459 return;
5460
Radim Krčmář1b4d56b2017-08-05 00:12:50 +02005461 guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
Sheng Yang0e851882009-12-18 16:48:46 +08005462}
5463
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005464static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5465{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005466 switch (func) {
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005467 case 0x1:
5468 if (avic)
5469 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5470 break;
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02005471 case 0x80000001:
5472 if (nested)
5473 entry->ecx |= (1 << 2); /* Set SVM bit */
5474 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005475 case 0x8000000A:
5476 entry->eax = 1; /* SVM revision 1 */
5477 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5478 ASID emulation to nested SVM */
5479 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02005480 entry->edx = 0; /* Per default do not support any
5481 additional features */
5482
5483 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02005484 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02005485 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005486
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02005487 /* Support NPT for the guest if enabled */
5488 if (npt_enabled)
5489 entry->edx |= SVM_FEATURE_NPT;
5490
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005491 break;
Brijesh Singh8765d752017-12-04 10:57:25 -06005492 case 0x8000001F:
5493 /* Support memory encryption cpuid if host supports it */
5494 if (boot_cpu_has(X86_FEATURE_SEV))
5495 cpuid(0x8000001f, &entry->eax, &entry->ebx,
5496 &entry->ecx, &entry->edx);
5497
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005498 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005499}
5500
Sheng Yang17cc3932010-01-05 19:02:27 +08005501static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02005502{
Sheng Yang17cc3932010-01-05 19:02:27 +08005503 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02005504}
5505
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005506static bool svm_rdtscp_supported(void)
5507{
Paolo Bonzini46896c72015-11-12 14:49:16 +01005508 return boot_cpu_has(X86_FEATURE_RDTSCP);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005509}
5510
Mao, Junjiead756a12012-07-02 01:18:48 +00005511static bool svm_invpcid_supported(void)
5512{
5513 return false;
5514}
5515
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01005516static bool svm_mpx_supported(void)
5517{
5518 return false;
5519}
5520
Wanpeng Li55412b22014-12-02 19:21:30 +08005521static bool svm_xsaves_supported(void)
5522{
5523 return false;
5524}
5525
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005526static bool svm_has_wbinvd_exit(void)
5527{
5528 return true;
5529}
5530
Joerg Roedel80612522011-04-04 12:39:33 +02005531#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005532 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005533#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005534 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005535#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005536 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005537
Mathias Krause09941fb2012-08-30 01:30:20 +02005538static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005539 u32 exit_code;
5540 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005541} x86_intercept_map[] = {
5542 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
5543 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
5544 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
5545 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
5546 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02005547 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
5548 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02005549 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
5550 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
5551 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
5552 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
5553 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
5554 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
5555 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
5556 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02005557 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
5558 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
5559 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
5560 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
5561 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
5562 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
5563 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
5564 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005565 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
5566 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
5567 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02005568 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
5569 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
5570 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
5571 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
5572 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
5573 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
5574 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
5575 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
5576 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02005577 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
5578 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
5579 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
5580 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
5581 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
5582 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
5583 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02005584 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
5585 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
5586 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
5587 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005588};
5589
Joerg Roedel80612522011-04-04 12:39:33 +02005590#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005591#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005592#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005593
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005594static int svm_check_intercept(struct kvm_vcpu *vcpu,
5595 struct x86_instruction_info *info,
5596 enum x86_intercept_stage stage)
5597{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005598 struct vcpu_svm *svm = to_svm(vcpu);
5599 int vmexit, ret = X86EMUL_CONTINUE;
5600 struct __x86_intercept icpt_info;
5601 struct vmcb *vmcb = svm->vmcb;
5602
5603 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
5604 goto out;
5605
5606 icpt_info = x86_intercept_map[info->intercept];
5607
Avi Kivity40e19b52011-04-21 12:35:41 +03005608 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005609 goto out;
5610
5611 switch (icpt_info.exit_code) {
5612 case SVM_EXIT_READ_CR0:
5613 if (info->intercept == x86_intercept_cr_read)
5614 icpt_info.exit_code += info->modrm_reg;
5615 break;
5616 case SVM_EXIT_WRITE_CR0: {
5617 unsigned long cr0, val;
5618 u64 intercept;
5619
5620 if (info->intercept == x86_intercept_cr_write)
5621 icpt_info.exit_code += info->modrm_reg;
5622
Jan Kiszka62baf442014-06-29 21:55:53 +02005623 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
5624 info->intercept == x86_intercept_clts)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005625 break;
5626
5627 intercept = svm->nested.intercept;
5628
5629 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
5630 break;
5631
5632 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
5633 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
5634
5635 if (info->intercept == x86_intercept_lmsw) {
5636 cr0 &= 0xfUL;
5637 val &= 0xfUL;
5638 /* lmsw can't clear PE - catch this here */
5639 if (cr0 & X86_CR0_PE)
5640 val |= X86_CR0_PE;
5641 }
5642
5643 if (cr0 ^ val)
5644 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
5645
5646 break;
5647 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02005648 case SVM_EXIT_READ_DR0:
5649 case SVM_EXIT_WRITE_DR0:
5650 icpt_info.exit_code += info->modrm_reg;
5651 break;
Joerg Roedel80612522011-04-04 12:39:33 +02005652 case SVM_EXIT_MSR:
5653 if (info->intercept == x86_intercept_wrmsr)
5654 vmcb->control.exit_info_1 = 1;
5655 else
5656 vmcb->control.exit_info_1 = 0;
5657 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02005658 case SVM_EXIT_PAUSE:
5659 /*
5660 * We get this for NOP only, but pause
5661 * is rep not, check this here
5662 */
5663 if (info->rep_prefix != REPE_PREFIX)
5664 goto out;
Jan H. Schönherr49a8afc2017-09-05 23:58:44 +02005665 break;
Joerg Roedelf6511932011-04-04 12:39:35 +02005666 case SVM_EXIT_IOIO: {
5667 u64 exit_info;
5668 u32 bytes;
5669
Joerg Roedelf6511932011-04-04 12:39:35 +02005670 if (info->intercept == x86_intercept_in ||
5671 info->intercept == x86_intercept_ins) {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005672 exit_info = ((info->src_val & 0xffff) << 16) |
5673 SVM_IOIO_TYPE_MASK;
Joerg Roedelf6511932011-04-04 12:39:35 +02005674 bytes = info->dst_bytes;
Jan Kiszka6493f152014-06-30 11:07:05 +02005675 } else {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005676 exit_info = (info->dst_val & 0xffff) << 16;
Jan Kiszka6493f152014-06-30 11:07:05 +02005677 bytes = info->src_bytes;
Joerg Roedelf6511932011-04-04 12:39:35 +02005678 }
5679
5680 if (info->intercept == x86_intercept_outs ||
5681 info->intercept == x86_intercept_ins)
5682 exit_info |= SVM_IOIO_STR_MASK;
5683
5684 if (info->rep_prefix)
5685 exit_info |= SVM_IOIO_REP_MASK;
5686
5687 bytes = min(bytes, 4u);
5688
5689 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
5690
5691 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
5692
5693 vmcb->control.exit_info_1 = exit_info;
5694 vmcb->control.exit_info_2 = info->next_rip;
5695
5696 break;
5697 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005698 default:
5699 break;
5700 }
5701
Bandan Dasf1047652015-06-11 02:05:33 -04005702 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
5703 if (static_cpu_has(X86_FEATURE_NRIPS))
5704 vmcb->control.next_rip = info->next_rip;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005705 vmcb->control.exit_code = icpt_info.exit_code;
5706 vmexit = nested_svm_exit_handled(svm);
5707
5708 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
5709 : X86EMUL_CONTINUE;
5710
5711out:
5712 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005713}
5714
Yang Zhanga547c6d2013-04-11 19:25:10 +08005715static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
5716{
5717 local_irq_enable();
Paolo Bonzinif2485b32016-06-15 15:23:11 +02005718 /*
5719 * We must have an instruction with interrupts enabled, so
5720 * the timer interrupt isn't delayed by the interrupt shadow.
5721 */
5722 asm("nop");
5723 local_irq_disable();
Yang Zhanga547c6d2013-04-11 19:25:10 +08005724}
5725
Radim Krčmářae97a3b2014-08-21 18:08:06 +02005726static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
5727{
5728}
5729
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05005730static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
5731{
5732 if (avic_handle_apic_id_update(vcpu) != 0)
5733 return;
5734 if (avic_handle_dfr_update(vcpu) != 0)
5735 return;
5736 avic_handle_ldr_update(vcpu);
5737}
5738
Borislav Petkov74f16902017-03-26 23:51:24 +02005739static void svm_setup_mce(struct kvm_vcpu *vcpu)
5740{
5741 /* [63:9] are reserved. */
5742 vcpu->arch.mcg_cap &= 0x1ff;
5743}
5744
Ladi Prosek72d7b372017-10-11 16:54:41 +02005745static int svm_smi_allowed(struct kvm_vcpu *vcpu)
5746{
Ladi Prosek05cade72017-10-11 16:54:45 +02005747 struct vcpu_svm *svm = to_svm(vcpu);
5748
5749 /* Per APM Vol.2 15.22.2 "Response to SMI" */
5750 if (!gif_set(svm))
5751 return 0;
5752
5753 if (is_guest_mode(&svm->vcpu) &&
5754 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
5755 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
5756 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
5757 svm->nested.exit_required = true;
5758 return 0;
5759 }
5760
Ladi Prosek72d7b372017-10-11 16:54:41 +02005761 return 1;
5762}
5763
Ladi Prosek0234bf82017-10-11 16:54:40 +02005764static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
5765{
Ladi Prosek05cade72017-10-11 16:54:45 +02005766 struct vcpu_svm *svm = to_svm(vcpu);
5767 int ret;
5768
5769 if (is_guest_mode(vcpu)) {
5770 /* FED8h - SVM Guest */
5771 put_smstate(u64, smstate, 0x7ed8, 1);
5772 /* FEE0h - SVM Guest VMCB Physical Address */
5773 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
5774
5775 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5776 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5777 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5778
5779 ret = nested_svm_vmexit(svm);
5780 if (ret)
5781 return ret;
5782 }
Ladi Prosek0234bf82017-10-11 16:54:40 +02005783 return 0;
5784}
5785
5786static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
5787{
Ladi Prosek05cade72017-10-11 16:54:45 +02005788 struct vcpu_svm *svm = to_svm(vcpu);
5789 struct vmcb *nested_vmcb;
5790 struct page *page;
5791 struct {
5792 u64 guest;
5793 u64 vmcb;
5794 } svm_state_save;
5795 int ret;
5796
5797 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
5798 sizeof(svm_state_save));
5799 if (ret)
5800 return ret;
5801
5802 if (svm_state_save.guest) {
5803 vcpu->arch.hflags &= ~HF_SMM_MASK;
5804 nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
5805 if (nested_vmcb)
5806 enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
5807 else
5808 ret = 1;
5809 vcpu->arch.hflags |= HF_SMM_MASK;
5810 }
5811 return ret;
Ladi Prosek0234bf82017-10-11 16:54:40 +02005812}
5813
Ladi Prosekcc3d9672017-10-17 16:02:39 +02005814static int enable_smi_window(struct kvm_vcpu *vcpu)
5815{
5816 struct vcpu_svm *svm = to_svm(vcpu);
5817
5818 if (!gif_set(svm)) {
5819 if (vgif_enabled(svm))
5820 set_intercept(svm, INTERCEPT_STGI);
5821 /* STGI will cause a vm exit */
5822 return 1;
5823 }
5824 return 0;
5825}
5826
Brijesh Singh1654efc2017-12-04 10:57:34 -06005827static int sev_asid_new(void)
5828{
5829 int pos;
5830
5831 /*
5832 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
5833 */
5834 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
5835 if (pos >= max_sev_asid)
5836 return -EBUSY;
5837
5838 set_bit(pos, sev_asid_bitmap);
5839 return pos + 1;
5840}
5841
5842static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
5843{
5844 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5845 int asid, ret;
5846
5847 ret = -EBUSY;
5848 asid = sev_asid_new();
5849 if (asid < 0)
5850 return ret;
5851
5852 ret = sev_platform_init(&argp->error);
5853 if (ret)
5854 goto e_free;
5855
5856 sev->active = true;
5857 sev->asid = asid;
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06005858 INIT_LIST_HEAD(&sev->regions_list);
Brijesh Singh1654efc2017-12-04 10:57:34 -06005859
5860 return 0;
5861
5862e_free:
5863 __sev_asid_free(asid);
5864 return ret;
5865}
5866
Brijesh Singh59414c92017-12-04 10:57:35 -06005867static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
5868{
5869 struct sev_data_activate *data;
5870 int asid = sev_get_asid(kvm);
5871 int ret;
5872
5873 wbinvd_on_all_cpus();
5874
5875 ret = sev_guest_df_flush(error);
5876 if (ret)
5877 return ret;
5878
5879 data = kzalloc(sizeof(*data), GFP_KERNEL);
5880 if (!data)
5881 return -ENOMEM;
5882
5883 /* activate ASID on the given handle */
5884 data->handle = handle;
5885 data->asid = asid;
5886 ret = sev_guest_activate(data, error);
5887 kfree(data);
5888
5889 return ret;
5890}
5891
Brijesh Singh89c50582017-12-04 10:57:35 -06005892static int __sev_issue_cmd(int fd, int id, void *data, int *error)
Brijesh Singh59414c92017-12-04 10:57:35 -06005893{
5894 struct fd f;
5895 int ret;
5896
5897 f = fdget(fd);
5898 if (!f.file)
5899 return -EBADF;
5900
5901 ret = sev_issue_cmd_external_user(f.file, id, data, error);
5902
5903 fdput(f);
5904 return ret;
5905}
5906
Brijesh Singh89c50582017-12-04 10:57:35 -06005907static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
5908{
5909 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5910
5911 return __sev_issue_cmd(sev->fd, id, data, error);
5912}
5913
Brijesh Singh59414c92017-12-04 10:57:35 -06005914static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
5915{
5916 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5917 struct sev_data_launch_start *start;
5918 struct kvm_sev_launch_start params;
5919 void *dh_blob, *session_blob;
5920 int *error = &argp->error;
5921 int ret;
5922
5923 if (!sev_guest(kvm))
5924 return -ENOTTY;
5925
5926 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
5927 return -EFAULT;
5928
5929 start = kzalloc(sizeof(*start), GFP_KERNEL);
5930 if (!start)
5931 return -ENOMEM;
5932
5933 dh_blob = NULL;
5934 if (params.dh_uaddr) {
5935 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
5936 if (IS_ERR(dh_blob)) {
5937 ret = PTR_ERR(dh_blob);
5938 goto e_free;
5939 }
5940
5941 start->dh_cert_address = __sme_set(__pa(dh_blob));
5942 start->dh_cert_len = params.dh_len;
5943 }
5944
5945 session_blob = NULL;
5946 if (params.session_uaddr) {
5947 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
5948 if (IS_ERR(session_blob)) {
5949 ret = PTR_ERR(session_blob);
5950 goto e_free_dh;
5951 }
5952
5953 start->session_address = __sme_set(__pa(session_blob));
5954 start->session_len = params.session_len;
5955 }
5956
5957 start->handle = params.handle;
5958 start->policy = params.policy;
5959
5960 /* create memory encryption context */
Brijesh Singh89c50582017-12-04 10:57:35 -06005961 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
Brijesh Singh59414c92017-12-04 10:57:35 -06005962 if (ret)
5963 goto e_free_session;
5964
5965 /* Bind ASID to this guest */
5966 ret = sev_bind_asid(kvm, start->handle, error);
5967 if (ret)
5968 goto e_free_session;
5969
5970 /* return handle to userspace */
5971 params.handle = start->handle;
5972 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
5973 sev_unbind_asid(kvm, start->handle);
5974 ret = -EFAULT;
5975 goto e_free_session;
5976 }
5977
5978 sev->handle = start->handle;
5979 sev->fd = argp->sev_fd;
5980
5981e_free_session:
5982 kfree(session_blob);
5983e_free_dh:
5984 kfree(dh_blob);
5985e_free:
5986 kfree(start);
5987 return ret;
5988}
5989
Brijesh Singh89c50582017-12-04 10:57:35 -06005990static int get_num_contig_pages(int idx, struct page **inpages,
5991 unsigned long npages)
5992{
5993 unsigned long paddr, next_paddr;
5994 int i = idx + 1, pages = 1;
5995
5996 /* find the number of contiguous pages starting from idx */
5997 paddr = __sme_page_pa(inpages[idx]);
5998 while (i < npages) {
5999 next_paddr = __sme_page_pa(inpages[i++]);
6000 if ((paddr + PAGE_SIZE) == next_paddr) {
6001 pages++;
6002 paddr = next_paddr;
6003 continue;
6004 }
6005 break;
6006 }
6007
6008 return pages;
6009}
6010
6011static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6012{
6013 unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
6014 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6015 struct kvm_sev_launch_update_data params;
6016 struct sev_data_launch_update_data *data;
6017 struct page **inpages;
6018 int i, ret, pages;
6019
6020 if (!sev_guest(kvm))
6021 return -ENOTTY;
6022
6023 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6024 return -EFAULT;
6025
6026 data = kzalloc(sizeof(*data), GFP_KERNEL);
6027 if (!data)
6028 return -ENOMEM;
6029
6030 vaddr = params.uaddr;
6031 size = params.len;
6032 vaddr_end = vaddr + size;
6033
6034 /* Lock the user memory. */
6035 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
6036 if (!inpages) {
6037 ret = -ENOMEM;
6038 goto e_free;
6039 }
6040
6041 /*
6042 * The LAUNCH_UPDATE command will perform in-place encryption of the
6043 * memory content (i.e it will write the same memory region with C=1).
6044 * It's possible that the cache may contain the data with C=0, i.e.,
6045 * unencrypted so invalidate it first.
6046 */
6047 sev_clflush_pages(inpages, npages);
6048
6049 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6050 int offset, len;
6051
6052 /*
6053 * If the user buffer is not page-aligned, calculate the offset
6054 * within the page.
6055 */
6056 offset = vaddr & (PAGE_SIZE - 1);
6057
6058 /* Calculate the number of pages that can be encrypted in one go. */
6059 pages = get_num_contig_pages(i, inpages, npages);
6060
6061 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6062
6063 data->handle = sev->handle;
6064 data->len = len;
6065 data->address = __sme_page_pa(inpages[i]) + offset;
6066 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6067 if (ret)
6068 goto e_unpin;
6069
6070 size -= len;
6071 next_vaddr = vaddr + len;
6072 }
6073
6074e_unpin:
6075 /* content of memory is updated, mark pages dirty */
6076 for (i = 0; i < npages; i++) {
6077 set_page_dirty_lock(inpages[i]);
6078 mark_page_accessed(inpages[i]);
6079 }
6080 /* unlock the user pages */
6081 sev_unpin_memory(kvm, inpages, npages);
6082e_free:
6083 kfree(data);
6084 return ret;
6085}
6086
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006087static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6088{
6089 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6090 struct sev_data_launch_measure *data;
6091 struct kvm_sev_launch_measure params;
6092 void *blob = NULL;
6093 int ret;
6094
6095 if (!sev_guest(kvm))
6096 return -ENOTTY;
6097
6098 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6099 return -EFAULT;
6100
6101 data = kzalloc(sizeof(*data), GFP_KERNEL);
6102 if (!data)
6103 return -ENOMEM;
6104
6105 /* User wants to query the blob length */
6106 if (!params.len)
6107 goto cmd;
6108
6109 if (params.uaddr) {
6110 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6111 ret = -EINVAL;
6112 goto e_free;
6113 }
6114
6115 if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
6116 ret = -EFAULT;
6117 goto e_free;
6118 }
6119
6120 ret = -ENOMEM;
6121 blob = kmalloc(params.len, GFP_KERNEL);
6122 if (!blob)
6123 goto e_free;
6124
6125 data->address = __psp_pa(blob);
6126 data->len = params.len;
6127 }
6128
6129cmd:
6130 data->handle = sev->handle;
6131 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6132
6133 /*
6134 * If we query the session length, FW responded with expected data.
6135 */
6136 if (!params.len)
6137 goto done;
6138
6139 if (ret)
6140 goto e_free_blob;
6141
6142 if (blob) {
6143 if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
6144 ret = -EFAULT;
6145 }
6146
6147done:
6148 params.len = data->len;
6149 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6150 ret = -EFAULT;
6151e_free_blob:
6152 kfree(blob);
6153e_free:
6154 kfree(data);
6155 return ret;
6156}
6157
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006158static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6159{
6160 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6161 struct sev_data_launch_finish *data;
6162 int ret;
6163
6164 if (!sev_guest(kvm))
6165 return -ENOTTY;
6166
6167 data = kzalloc(sizeof(*data), GFP_KERNEL);
6168 if (!data)
6169 return -ENOMEM;
6170
6171 data->handle = sev->handle;
6172 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6173
6174 kfree(data);
6175 return ret;
6176}
6177
Brijesh Singh255d9e72017-12-04 10:57:37 -06006178static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6179{
6180 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6181 struct kvm_sev_guest_status params;
6182 struct sev_data_guest_status *data;
6183 int ret;
6184
6185 if (!sev_guest(kvm))
6186 return -ENOTTY;
6187
6188 data = kzalloc(sizeof(*data), GFP_KERNEL);
6189 if (!data)
6190 return -ENOMEM;
6191
6192 data->handle = sev->handle;
6193 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6194 if (ret)
6195 goto e_free;
6196
6197 params.policy = data->policy;
6198 params.state = data->state;
6199 params.handle = data->handle;
6200
6201 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6202 ret = -EFAULT;
6203e_free:
6204 kfree(data);
6205 return ret;
6206}
6207
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006208static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6209 unsigned long dst, int size,
6210 int *error, bool enc)
6211{
6212 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6213 struct sev_data_dbg *data;
6214 int ret;
6215
6216 data = kzalloc(sizeof(*data), GFP_KERNEL);
6217 if (!data)
6218 return -ENOMEM;
6219
6220 data->handle = sev->handle;
6221 data->dst_addr = dst;
6222 data->src_addr = src;
6223 data->len = size;
6224
6225 ret = sev_issue_cmd(kvm,
6226 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6227 data, error);
6228 kfree(data);
6229 return ret;
6230}
6231
6232static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6233 unsigned long dst_paddr, int sz, int *err)
6234{
6235 int offset;
6236
6237 /*
6238 * Its safe to read more than we are asked, caller should ensure that
6239 * destination has enough space.
6240 */
6241 src_paddr = round_down(src_paddr, 16);
6242 offset = src_paddr & 15;
6243 sz = round_up(sz + offset, 16);
6244
6245 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6246}
6247
6248static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6249 unsigned long __user dst_uaddr,
6250 unsigned long dst_paddr,
6251 int size, int *err)
6252{
6253 struct page *tpage = NULL;
6254 int ret, offset;
6255
6256 /* if inputs are not 16-byte then use intermediate buffer */
6257 if (!IS_ALIGNED(dst_paddr, 16) ||
6258 !IS_ALIGNED(paddr, 16) ||
6259 !IS_ALIGNED(size, 16)) {
6260 tpage = (void *)alloc_page(GFP_KERNEL);
6261 if (!tpage)
6262 return -ENOMEM;
6263
6264 dst_paddr = __sme_page_pa(tpage);
6265 }
6266
6267 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6268 if (ret)
6269 goto e_free;
6270
6271 if (tpage) {
6272 offset = paddr & 15;
6273 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6274 page_address(tpage) + offset, size))
6275 ret = -EFAULT;
6276 }
6277
6278e_free:
6279 if (tpage)
6280 __free_page(tpage);
6281
6282 return ret;
6283}
6284
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006285static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6286 unsigned long __user vaddr,
6287 unsigned long dst_paddr,
6288 unsigned long __user dst_vaddr,
6289 int size, int *error)
6290{
6291 struct page *src_tpage = NULL;
6292 struct page *dst_tpage = NULL;
6293 int ret, len = size;
6294
6295 /* If source buffer is not aligned then use an intermediate buffer */
6296 if (!IS_ALIGNED(vaddr, 16)) {
6297 src_tpage = alloc_page(GFP_KERNEL);
6298 if (!src_tpage)
6299 return -ENOMEM;
6300
6301 if (copy_from_user(page_address(src_tpage),
6302 (void __user *)(uintptr_t)vaddr, size)) {
6303 __free_page(src_tpage);
6304 return -EFAULT;
6305 }
6306
6307 paddr = __sme_page_pa(src_tpage);
6308 }
6309
6310 /*
6311 * If destination buffer or length is not aligned then do read-modify-write:
6312 * - decrypt destination in an intermediate buffer
6313 * - copy the source buffer in an intermediate buffer
6314 * - use the intermediate buffer as source buffer
6315 */
6316 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6317 int dst_offset;
6318
6319 dst_tpage = alloc_page(GFP_KERNEL);
6320 if (!dst_tpage) {
6321 ret = -ENOMEM;
6322 goto e_free;
6323 }
6324
6325 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6326 __sme_page_pa(dst_tpage), size, error);
6327 if (ret)
6328 goto e_free;
6329
6330 /*
6331 * If source is kernel buffer then use memcpy() otherwise
6332 * copy_from_user().
6333 */
6334 dst_offset = dst_paddr & 15;
6335
6336 if (src_tpage)
6337 memcpy(page_address(dst_tpage) + dst_offset,
6338 page_address(src_tpage), size);
6339 else {
6340 if (copy_from_user(page_address(dst_tpage) + dst_offset,
6341 (void __user *)(uintptr_t)vaddr, size)) {
6342 ret = -EFAULT;
6343 goto e_free;
6344 }
6345 }
6346
6347 paddr = __sme_page_pa(dst_tpage);
6348 dst_paddr = round_down(dst_paddr, 16);
6349 len = round_up(size, 16);
6350 }
6351
6352 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6353
6354e_free:
6355 if (src_tpage)
6356 __free_page(src_tpage);
6357 if (dst_tpage)
6358 __free_page(dst_tpage);
6359 return ret;
6360}
6361
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006362static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6363{
6364 unsigned long vaddr, vaddr_end, next_vaddr;
6365 unsigned long dst_vaddr, dst_vaddr_end;
6366 struct page **src_p, **dst_p;
6367 struct kvm_sev_dbg debug;
6368 unsigned long n;
6369 int ret, size;
6370
6371 if (!sev_guest(kvm))
6372 return -ENOTTY;
6373
6374 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6375 return -EFAULT;
6376
6377 vaddr = debug.src_uaddr;
6378 size = debug.len;
6379 vaddr_end = vaddr + size;
6380 dst_vaddr = debug.dst_uaddr;
6381 dst_vaddr_end = dst_vaddr + size;
6382
6383 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6384 int len, s_off, d_off;
6385
6386 /* lock userspace source and destination page */
6387 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6388 if (!src_p)
6389 return -EFAULT;
6390
6391 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6392 if (!dst_p) {
6393 sev_unpin_memory(kvm, src_p, n);
6394 return -EFAULT;
6395 }
6396
6397 /*
6398 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6399 * memory content (i.e it will write the same memory region with C=1).
6400 * It's possible that the cache may contain the data with C=0, i.e.,
6401 * unencrypted so invalidate it first.
6402 */
6403 sev_clflush_pages(src_p, 1);
6404 sev_clflush_pages(dst_p, 1);
6405
6406 /*
6407 * Since user buffer may not be page aligned, calculate the
6408 * offset within the page.
6409 */
6410 s_off = vaddr & ~PAGE_MASK;
6411 d_off = dst_vaddr & ~PAGE_MASK;
6412 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6413
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006414 if (dec)
6415 ret = __sev_dbg_decrypt_user(kvm,
6416 __sme_page_pa(src_p[0]) + s_off,
6417 dst_vaddr,
6418 __sme_page_pa(dst_p[0]) + d_off,
6419 len, &argp->error);
6420 else
6421 ret = __sev_dbg_encrypt_user(kvm,
6422 __sme_page_pa(src_p[0]) + s_off,
6423 vaddr,
6424 __sme_page_pa(dst_p[0]) + d_off,
6425 dst_vaddr,
6426 len, &argp->error);
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006427
6428 sev_unpin_memory(kvm, src_p, 1);
6429 sev_unpin_memory(kvm, dst_p, 1);
6430
6431 if (ret)
6432 goto err;
6433
6434 next_vaddr = vaddr + len;
6435 dst_vaddr = dst_vaddr + len;
6436 size -= len;
6437 }
6438err:
6439 return ret;
6440}
6441
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006442static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6443{
6444 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6445 struct sev_data_launch_secret *data;
6446 struct kvm_sev_launch_secret params;
6447 struct page **pages;
6448 void *blob, *hdr;
6449 unsigned long n;
6450 int ret;
6451
6452 if (!sev_guest(kvm))
6453 return -ENOTTY;
6454
6455 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6456 return -EFAULT;
6457
6458 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6459 if (!pages)
6460 return -ENOMEM;
6461
6462 /*
6463 * The secret must be copied into contiguous memory region, lets verify
6464 * that userspace memory pages are contiguous before we issue command.
6465 */
6466 if (get_num_contig_pages(0, pages, n) != n) {
6467 ret = -EINVAL;
6468 goto e_unpin_memory;
6469 }
6470
6471 ret = -ENOMEM;
6472 data = kzalloc(sizeof(*data), GFP_KERNEL);
6473 if (!data)
6474 goto e_unpin_memory;
6475
6476 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6477 if (IS_ERR(blob)) {
6478 ret = PTR_ERR(blob);
6479 goto e_free;
6480 }
6481
6482 data->trans_address = __psp_pa(blob);
6483 data->trans_len = params.trans_len;
6484
6485 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6486 if (IS_ERR(hdr)) {
6487 ret = PTR_ERR(hdr);
6488 goto e_free_blob;
6489 }
6490 data->trans_address = __psp_pa(blob);
6491 data->trans_len = params.trans_len;
6492
6493 data->handle = sev->handle;
6494 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6495
6496 kfree(hdr);
6497
6498e_free_blob:
6499 kfree(blob);
6500e_free:
6501 kfree(data);
6502e_unpin_memory:
6503 sev_unpin_memory(kvm, pages, n);
6504 return ret;
6505}
6506
Brijesh Singh1654efc2017-12-04 10:57:34 -06006507static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6508{
6509 struct kvm_sev_cmd sev_cmd;
6510 int r;
6511
6512 if (!svm_sev_enabled())
6513 return -ENOTTY;
6514
6515 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6516 return -EFAULT;
6517
6518 mutex_lock(&kvm->lock);
6519
6520 switch (sev_cmd.id) {
6521 case KVM_SEV_INIT:
6522 r = sev_guest_init(kvm, &sev_cmd);
6523 break;
Brijesh Singh59414c92017-12-04 10:57:35 -06006524 case KVM_SEV_LAUNCH_START:
6525 r = sev_launch_start(kvm, &sev_cmd);
6526 break;
Brijesh Singh89c50582017-12-04 10:57:35 -06006527 case KVM_SEV_LAUNCH_UPDATE_DATA:
6528 r = sev_launch_update_data(kvm, &sev_cmd);
6529 break;
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006530 case KVM_SEV_LAUNCH_MEASURE:
6531 r = sev_launch_measure(kvm, &sev_cmd);
6532 break;
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006533 case KVM_SEV_LAUNCH_FINISH:
6534 r = sev_launch_finish(kvm, &sev_cmd);
6535 break;
Brijesh Singh255d9e72017-12-04 10:57:37 -06006536 case KVM_SEV_GUEST_STATUS:
6537 r = sev_guest_status(kvm, &sev_cmd);
6538 break;
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006539 case KVM_SEV_DBG_DECRYPT:
6540 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6541 break;
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006542 case KVM_SEV_DBG_ENCRYPT:
6543 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6544 break;
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006545 case KVM_SEV_LAUNCH_SECRET:
6546 r = sev_launch_secret(kvm, &sev_cmd);
6547 break;
Brijesh Singh1654efc2017-12-04 10:57:34 -06006548 default:
6549 r = -EINVAL;
6550 goto out;
6551 }
6552
6553 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
6554 r = -EFAULT;
6555
6556out:
6557 mutex_unlock(&kvm->lock);
6558 return r;
6559}
6560
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06006561static int svm_register_enc_region(struct kvm *kvm,
6562 struct kvm_enc_region *range)
6563{
6564 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6565 struct enc_region *region;
6566 int ret = 0;
6567
6568 if (!sev_guest(kvm))
6569 return -ENOTTY;
6570
6571 region = kzalloc(sizeof(*region), GFP_KERNEL);
6572 if (!region)
6573 return -ENOMEM;
6574
6575 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
6576 if (!region->pages) {
6577 ret = -ENOMEM;
6578 goto e_free;
6579 }
6580
6581 /*
6582 * The guest may change the memory encryption attribute from C=0 -> C=1
6583 * or vice versa for this memory range. Lets make sure caches are
6584 * flushed to ensure that guest data gets written into memory with
6585 * correct C-bit.
6586 */
6587 sev_clflush_pages(region->pages, region->npages);
6588
6589 region->uaddr = range->addr;
6590 region->size = range->size;
6591
6592 mutex_lock(&kvm->lock);
6593 list_add_tail(&region->list, &sev->regions_list);
6594 mutex_unlock(&kvm->lock);
6595
6596 return ret;
6597
6598e_free:
6599 kfree(region);
6600 return ret;
6601}
6602
6603static struct enc_region *
6604find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
6605{
6606 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6607 struct list_head *head = &sev->regions_list;
6608 struct enc_region *i;
6609
6610 list_for_each_entry(i, head, list) {
6611 if (i->uaddr == range->addr &&
6612 i->size == range->size)
6613 return i;
6614 }
6615
6616 return NULL;
6617}
6618
6619
6620static int svm_unregister_enc_region(struct kvm *kvm,
6621 struct kvm_enc_region *range)
6622{
6623 struct enc_region *region;
6624 int ret;
6625
6626 mutex_lock(&kvm->lock);
6627
6628 if (!sev_guest(kvm)) {
6629 ret = -ENOTTY;
6630 goto failed;
6631 }
6632
6633 region = find_enc_region(kvm, range);
6634 if (!region) {
6635 ret = -EINVAL;
6636 goto failed;
6637 }
6638
6639 __unregister_enc_region_locked(kvm, region);
6640
6641 mutex_unlock(&kvm->lock);
6642 return 0;
6643
6644failed:
6645 mutex_unlock(&kvm->lock);
6646 return ret;
6647}
6648
Kees Cook404f6aa2016-08-08 16:29:06 -07006649static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08006650 .cpu_has_kvm_support = has_svm,
6651 .disabled_by_bios = is_disabled,
6652 .hardware_setup = svm_hardware_setup,
6653 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03006654 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006655 .hardware_enable = svm_hardware_enable,
6656 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02006657 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Paolo Bonzini6d396b52015-04-01 14:25:33 +02006658 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006659
6660 .vcpu_create = svm_create_vcpu,
6661 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006662 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006663
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006664 .vm_init = avic_vm_init,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006665 .vm_destroy = svm_vm_destroy,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006666
Avi Kivity04d2cc72007-09-10 18:10:54 +03006667 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006668 .vcpu_load = svm_vcpu_load,
6669 .vcpu_put = svm_vcpu_put,
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05006670 .vcpu_blocking = svm_vcpu_blocking,
6671 .vcpu_unblocking = svm_vcpu_unblocking,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006672
Paolo Bonzinia96036b2015-11-10 11:55:36 +01006673 .update_bp_intercept = update_bp_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006674 .get_msr = svm_get_msr,
6675 .set_msr = svm_set_msr,
6676 .get_segment_base = svm_get_segment_base,
6677 .get_segment = svm_get_segment,
6678 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02006679 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10006680 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02006681 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02006682 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03006683 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006684 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006685 .set_cr3 = svm_set_cr3,
6686 .set_cr4 = svm_set_cr4,
6687 .set_efer = svm_set_efer,
6688 .get_idt = svm_get_idt,
6689 .set_idt = svm_set_idt,
6690 .get_gdt = svm_get_gdt,
6691 .set_gdt = svm_set_gdt,
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01006692 .get_dr6 = svm_get_dr6,
6693 .set_dr6 = svm_set_dr6,
Gleb Natapov020df072010-04-13 10:05:23 +03006694 .set_dr7 = svm_set_dr7,
Paolo Bonzinifacb0132014-02-21 10:32:27 +01006695 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03006696 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006697 .get_rflags = svm_get_rflags,
6698 .set_rflags = svm_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08006699
Avi Kivity6aa8b732006-12-10 02:21:36 -08006700 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006701
Avi Kivity6aa8b732006-12-10 02:21:36 -08006702 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006703 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006704 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04006705 .set_interrupt_shadow = svm_set_interrupt_shadow,
6706 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02006707 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03006708 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006709 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02006710 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03006711 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02006712 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006713 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006714 .get_nmi_mask = svm_get_nmi_mask,
6715 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006716 .enable_nmi_window = enable_nmi_window,
6717 .enable_irq_window = enable_irq_window,
6718 .update_cr8_intercept = update_cr8_intercept,
Yang Zhang8d146952013-01-25 10:18:50 +08006719 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
Andrey Smetanind62caab2015-11-10 15:36:33 +03006720 .get_enable_apicv = svm_get_enable_apicv,
6721 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
Yang Zhangc7c9c562013-01-25 10:18:51 +08006722 .load_eoi_exitmap = svm_load_eoi_exitmap,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006723 .hwapic_irr_update = svm_hwapic_irr_update,
6724 .hwapic_isr_update = svm_hwapic_isr_update,
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05006725 .apicv_post_state_restore = avic_post_state_restore,
Izik Eiduscbc94022007-10-25 00:29:55 +02006726
6727 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08006728 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08006729 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006730
Avi Kivity586f9602010-11-18 13:09:54 +02006731 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02006732
Sheng Yang17cc3932010-01-05 19:02:27 +08006733 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08006734
6735 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08006736
6737 .rdtscp_supported = svm_rdtscp_supported,
Mao, Junjiead756a12012-07-02 01:18:48 +00006738 .invpcid_supported = svm_invpcid_supported,
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01006739 .mpx_supported = svm_mpx_supported,
Wanpeng Li55412b22014-12-02 19:21:30 +08006740 .xsaves_supported = svm_xsaves_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02006741
6742 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08006743
6744 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10006745
6746 .write_tsc_offset = svm_write_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02006747
6748 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02006749
6750 .check_intercept = svm_check_intercept,
Yang Zhanga547c6d2013-04-11 19:25:10 +08006751 .handle_external_intr = svm_handle_external_intr,
Radim Krčmářae97a3b2014-08-21 18:08:06 +02006752
6753 .sched_in = svm_sched_in,
Wei Huang25462f72015-06-19 15:45:05 +02006754
6755 .pmu_ops = &amd_pmu_ops,
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05006756 .deliver_posted_interrupt = svm_deliver_avic_intr,
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05006757 .update_pi_irte = svm_update_pi_irte,
Borislav Petkov74f16902017-03-26 23:51:24 +02006758 .setup_mce = svm_setup_mce,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006759
Ladi Prosek72d7b372017-10-11 16:54:41 +02006760 .smi_allowed = svm_smi_allowed,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006761 .pre_enter_smm = svm_pre_enter_smm,
6762 .pre_leave_smm = svm_pre_leave_smm,
Ladi Prosekcc3d9672017-10-17 16:02:39 +02006763 .enable_smi_window = enable_smi_window,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006764
6765 .mem_enc_op = svm_mem_enc_op,
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06006766 .mem_enc_reg_region = svm_register_enc_region,
6767 .mem_enc_unreg_region = svm_unregister_enc_region,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006768};
6769
6770static int __init svm_init(void)
6771{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006772 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03006773 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006774}
6775
6776static void __exit svm_exit(void)
6777{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006778 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08006779}
6780
6781module_init(svm_init)
6782module_exit(svm_exit)