blob: a05846a524d936430f63e16f3c90a677770a2f96 [file] [log] [blame]
Xiao Guangrongff536042015-06-15 16:55:22 +08001/*
2 * vMTRR implementation
3 *
4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 */
18
19#include <linux/kvm_host.h>
20#include <asm/mtrr.h>
21
22#include "cpuid.h"
23#include "mmu.h"
24
25static bool msr_mtrr_valid(unsigned msr)
26{
27 switch (msr) {
28 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
29 case MSR_MTRRfix64K_00000:
30 case MSR_MTRRfix16K_80000:
31 case MSR_MTRRfix16K_A0000:
32 case MSR_MTRRfix4K_C0000:
33 case MSR_MTRRfix4K_C8000:
34 case MSR_MTRRfix4K_D0000:
35 case MSR_MTRRfix4K_D8000:
36 case MSR_MTRRfix4K_E0000:
37 case MSR_MTRRfix4K_E8000:
38 case MSR_MTRRfix4K_F0000:
39 case MSR_MTRRfix4K_F8000:
40 case MSR_MTRRdefType:
41 case MSR_IA32_CR_PAT:
42 return true;
43 case 0x2f8:
44 return true;
45 }
46 return false;
47}
48
49static bool valid_pat_type(unsigned t)
50{
51 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
52}
53
54static bool valid_mtrr_type(unsigned t)
55{
56 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
57}
58
59bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
60{
61 int i;
62 u64 mask;
63
64 if (!msr_mtrr_valid(msr))
65 return false;
66
67 if (msr == MSR_IA32_CR_PAT) {
68 for (i = 0; i < 8; i++)
69 if (!valid_pat_type((data >> (i * 8)) & 0xff))
70 return false;
71 return true;
72 } else if (msr == MSR_MTRRdefType) {
73 if (data & ~0xcff)
74 return false;
75 return valid_mtrr_type(data & 0xff);
76 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
77 for (i = 0; i < 8 ; i++)
78 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
79 return false;
80 return true;
81 }
82
83 /* variable MTRRs */
84 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
85
86 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
87 if ((msr & 1) == 0) {
88 /* MTRR base */
89 if (!valid_mtrr_type(data & 0xff))
90 return false;
91 mask |= 0xf00;
92 } else
93 /* MTRR mask */
94 mask |= 0x7ff;
95 if (data & mask) {
96 kvm_inject_gp(vcpu, 0);
97 return false;
98 }
99
100 return true;
101}
102EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
103
104static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
105{
106 struct mtrr_state_type *mtrr_state = &vcpu->arch.mtrr_state;
107 unsigned char mtrr_enabled = mtrr_state->enabled;
108 gfn_t start, end, mask;
109 int index;
110 bool is_fixed = true;
111
112 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
113 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
114 return;
115
116 if (!(mtrr_enabled & 0x2) && msr != MSR_MTRRdefType)
117 return;
118
119 switch (msr) {
120 case MSR_MTRRfix64K_00000:
121 start = 0x0;
122 end = 0x80000;
123 break;
124 case MSR_MTRRfix16K_80000:
125 start = 0x80000;
126 end = 0xa0000;
127 break;
128 case MSR_MTRRfix16K_A0000:
129 start = 0xa0000;
130 end = 0xc0000;
131 break;
132 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
133 index = msr - MSR_MTRRfix4K_C0000;
134 start = 0xc0000 + index * (32 << 10);
135 end = start + (32 << 10);
136 break;
137 case MSR_MTRRdefType:
138 is_fixed = false;
139 start = 0x0;
140 end = ~0ULL;
141 break;
142 default:
143 /* variable range MTRRs. */
144 is_fixed = false;
145 index = (msr - 0x200) / 2;
146 start = (((u64)mtrr_state->var_ranges[index].base_hi) << 32) +
147 (mtrr_state->var_ranges[index].base_lo & PAGE_MASK);
148 mask = (((u64)mtrr_state->var_ranges[index].mask_hi) << 32) +
149 (mtrr_state->var_ranges[index].mask_lo & PAGE_MASK);
150 mask |= ~0ULL << cpuid_maxphyaddr(vcpu);
151
152 end = ((start & mask) | ~mask) + 1;
153 }
154
155 if (is_fixed && !(mtrr_enabled & 0x1))
156 return;
157
158 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
159}
160
161int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
162{
163 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
164
165 if (!kvm_mtrr_valid(vcpu, msr, data))
166 return 1;
167
168 if (msr == MSR_MTRRdefType) {
169 vcpu->arch.mtrr_state.def_type = data;
170 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
171 } else if (msr == MSR_MTRRfix64K_00000)
172 p[0] = data;
173 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
174 p[1 + msr - MSR_MTRRfix16K_80000] = data;
175 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
176 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
177 else if (msr == MSR_IA32_CR_PAT)
178 vcpu->arch.pat = data;
179 else { /* Variable MTRRs */
180 int idx, is_mtrr_mask;
181 u64 *pt;
182
183 idx = (msr - 0x200) / 2;
184 is_mtrr_mask = msr - 0x200 - 2 * idx;
185 if (!is_mtrr_mask)
186 pt =
187 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
188 else
189 pt =
190 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
191 *pt = data;
192 }
193
194 update_mtrr(vcpu, msr);
195 return 0;
196}
197
198int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
199{
200 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
201
Xiao Guangrongeb839912015-06-15 16:55:23 +0800202 /* MSR_MTRRcap is a readonly MSR. */
203 if (msr == MSR_MTRRcap) {
204 /*
205 * SMRR = 0
206 * WC = 1
207 * FIX = 1
208 * VCNT = KVM_NR_VAR_MTRR
209 */
210 *pdata = 0x500 | KVM_NR_VAR_MTRR;
211 return 0;
212 }
213
Xiao Guangrongff536042015-06-15 16:55:22 +0800214 if (!msr_mtrr_valid(msr))
215 return 1;
216
217 if (msr == MSR_MTRRdefType)
218 *pdata = vcpu->arch.mtrr_state.def_type +
219 (vcpu->arch.mtrr_state.enabled << 10);
220 else if (msr == MSR_MTRRfix64K_00000)
221 *pdata = p[0];
222 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
223 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
224 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
225 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
226 else if (msr == MSR_IA32_CR_PAT)
227 *pdata = vcpu->arch.pat;
228 else { /* Variable MTRRs */
229 int idx, is_mtrr_mask;
230 u64 *pt;
231
232 idx = (msr - 0x200) / 2;
233 is_mtrr_mask = msr - 0x200 - 2 * idx;
234 if (!is_mtrr_mask)
235 pt =
236 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
237 else
238 pt =
239 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
240 *pdata = *pt;
241 }
242
243 return 0;
244}
245
246/*
247 * The function is based on mtrr_type_lookup() in
248 * arch/x86/kernel/cpu/mtrr/generic.c
249 */
250static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
251 u64 start, u64 end)
252{
253 u64 base, mask;
254 u8 prev_match, curr_match;
255 int i, num_var_ranges = KVM_NR_VAR_MTRR;
256
257 /* MTRR is completely disabled, use UC for all of physical memory. */
258 if (!(mtrr_state->enabled & 0x2))
259 return MTRR_TYPE_UNCACHABLE;
260
261 /* Make end inclusive end, instead of exclusive */
262 end--;
263
264 /* Look in fixed ranges. Just return the type as per start */
265 if (mtrr_state->have_fixed && (mtrr_state->enabled & 0x1) &&
266 (start < 0x100000)) {
267 int idx;
268
269 if (start < 0x80000) {
270 idx = 0;
271 idx += (start >> 16);
272 return mtrr_state->fixed_ranges[idx];
273 } else if (start < 0xC0000) {
274 idx = 1 * 8;
275 idx += ((start - 0x80000) >> 14);
276 return mtrr_state->fixed_ranges[idx];
277 } else if (start < 0x1000000) {
278 idx = 3 * 8;
279 idx += ((start - 0xC0000) >> 12);
280 return mtrr_state->fixed_ranges[idx];
281 }
282 }
283
284 /*
285 * Look in variable ranges
286 * Look of multiple ranges matching this address and pick type
287 * as per MTRR precedence
288 */
289 prev_match = 0xFF;
290 for (i = 0; i < num_var_ranges; ++i) {
291 unsigned short start_state, end_state;
292
293 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
294 continue;
295
296 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
297 (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
298 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
299 (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
300
301 start_state = ((start & mask) == (base & mask));
302 end_state = ((end & mask) == (base & mask));
303 if (start_state != end_state)
304 return 0xFE;
305
306 if ((start & mask) != (base & mask))
307 continue;
308
309 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
310 if (prev_match == 0xFF) {
311 prev_match = curr_match;
312 continue;
313 }
314
315 if (prev_match == MTRR_TYPE_UNCACHABLE ||
316 curr_match == MTRR_TYPE_UNCACHABLE)
317 return MTRR_TYPE_UNCACHABLE;
318
319 if ((prev_match == MTRR_TYPE_WRBACK &&
320 curr_match == MTRR_TYPE_WRTHROUGH) ||
321 (prev_match == MTRR_TYPE_WRTHROUGH &&
322 curr_match == MTRR_TYPE_WRBACK)) {
323 prev_match = MTRR_TYPE_WRTHROUGH;
324 curr_match = MTRR_TYPE_WRTHROUGH;
325 }
326
327 if (prev_match != curr_match)
328 return MTRR_TYPE_UNCACHABLE;
329 }
330
331 if (prev_match != 0xFF)
332 return prev_match;
333
334 return mtrr_state->def_type;
335}
336
337u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
338{
339 u8 mtrr;
340
341 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
342 (gfn << PAGE_SHIFT) + PAGE_SIZE);
343 if (mtrr == 0xfe || mtrr == 0xff)
344 mtrr = MTRR_TYPE_WRBACK;
345 return mtrr;
346}
347EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);