blob: 7af49ae80e5af0d533ead94be03017b892a34c5f [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
Avi Kivity6aa8b732006-12-10 02:21:36 -080035#elif PTTYPE == 32
36 #define pt_element_t u32
37 #define guest_walker guest_walker32
38 #define FNAME(name) paging##32_##name
39 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
40 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
41 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
42 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
43 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
44 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
Avi Kivity6aa8b732006-12-10 02:21:36 -080045#else
46 #error Invalid PTTYPE value
47#endif
48
49/*
50 * The guest_walker structure emulates the behavior of the hardware page
51 * table walker.
52 */
53struct guest_walker {
54 int level;
Avi Kivity6bcbd6a2007-01-05 16:36:39 -080055 gfn_t table_gfn;
Avi Kivity6aa8b732006-12-10 02:21:36 -080056 pt_element_t *table;
Avi Kivityac79c972007-01-05 16:36:40 -080057 pt_element_t *ptep;
Avi Kivity6aa8b732006-12-10 02:21:36 -080058 pt_element_t inherited_ar;
59};
60
Avi Kivityac79c972007-01-05 16:36:40 -080061/*
62 * Fetch a guest pte for a guest virtual address
63 */
64static void FNAME(walk_addr)(struct guest_walker *walker,
65 struct kvm_vcpu *vcpu, gva_t addr)
Avi Kivity6aa8b732006-12-10 02:21:36 -080066{
67 hpa_t hpa;
68 struct kvm_memory_slot *slot;
Avi Kivityac79c972007-01-05 16:36:40 -080069 pt_element_t *ptep;
Avi Kivity1b0973b2007-01-05 16:36:41 -080070 pt_element_t root;
Avi Kivity6aa8b732006-12-10 02:21:36 -080071
72 walker->level = vcpu->mmu.root_level;
Avi Kivity1b0973b2007-01-05 16:36:41 -080073 walker->table = NULL;
74 root = vcpu->cr3;
75#if PTTYPE == 64
76 if (!is_long_mode(vcpu)) {
77 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
78 root = *walker->ptep;
79 if (!(root & PT_PRESENT_MASK))
80 return;
81 --walker->level;
82 }
83#endif
84 walker->table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
Avi Kivity6bcbd6a2007-01-05 16:36:39 -080085 slot = gfn_to_memslot(vcpu->kvm, walker->table_gfn);
Avi Kivity1b0973b2007-01-05 16:36:41 -080086 hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
Avi Kivity6aa8b732006-12-10 02:21:36 -080087 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
88
Avi Kivitya9058ec2006-12-29 16:49:37 -080089 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
Avi Kivity6aa8b732006-12-10 02:21:36 -080090 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
91
Avi Kivity6aa8b732006-12-10 02:21:36 -080092 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
Avi Kivityac79c972007-01-05 16:36:40 -080093
94 for (;;) {
95 int index = PT_INDEX(addr, walker->level);
96 hpa_t paddr;
97
98 ptep = &walker->table[index];
99 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
100 ((unsigned long)ptep & PAGE_MASK));
101
Avi Kivity1b0973b2007-01-05 16:36:41 -0800102 if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK))
103 *ptep |= PT_ACCESSED_MASK;
Avi Kivityac79c972007-01-05 16:36:40 -0800104
105 if (!is_present_pte(*ptep) ||
106 walker->level == PT_PAGE_TABLE_LEVEL ||
107 (walker->level == PT_DIRECTORY_LEVEL &&
108 (*ptep & PT_PAGE_SIZE_MASK) &&
109 (PTTYPE == 64 || is_pse(vcpu))))
110 break;
111
112 if (walker->level != 3 || is_long_mode(vcpu))
113 walker->inherited_ar &= walker->table[index];
114 walker->table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
115 paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
116 kunmap_atomic(walker->table, KM_USER0);
117 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
118 KM_USER0);
119 --walker->level;
120 }
121 walker->ptep = ptep;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800122}
123
124static void FNAME(release_walker)(struct guest_walker *walker)
125{
Avi Kivity1b0973b2007-01-05 16:36:41 -0800126 if (walker->table)
127 kunmap_atomic(walker->table, KM_USER0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800128}
129
130static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
131 u64 *shadow_pte, u64 access_bits)
132{
133 ASSERT(*shadow_pte == 0);
134 access_bits &= guest_pte;
135 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
136 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
137 guest_pte & PT_DIRTY_MASK, access_bits);
138}
139
140static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
141 u64 *shadow_pte, u64 access_bits,
142 int index)
143{
144 gpa_t gaddr;
145
146 ASSERT(*shadow_pte == 0);
147 access_bits &= guest_pde;
148 gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index;
149 if (PTTYPE == 32 && is_cpuid_PSE36())
150 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
151 (32 - PT32_DIR_PSE36_SHIFT);
Avi Kivity8c7bb722006-12-13 00:34:02 -0800152 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800153 set_pte_common(vcpu, shadow_pte, gaddr,
154 guest_pde & PT_DIRTY_MASK, access_bits);
155}
156
157/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800158 * Fetch a shadow pte for a specific level in the paging hierarchy.
159 */
160static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
161 struct guest_walker *walker)
162{
163 hpa_t shadow_addr;
164 int level;
165 u64 *prev_shadow_ent = NULL;
Avi Kivityac79c972007-01-05 16:36:40 -0800166 pt_element_t *guest_ent = walker->ptep;
167
168 if (!is_present_pte(*guest_ent))
169 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800170
171 shadow_addr = vcpu->mmu.root_hpa;
172 level = vcpu->mmu.shadow_root_level;
Avi Kivityaef3d3f2007-01-05 16:36:41 -0800173 if (level == PT32E_ROOT_LEVEL) {
174 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
175 shadow_addr &= PT64_BASE_ADDR_MASK;
176 --level;
177 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800178
179 for (; ; level--) {
180 u32 index = SHADOW_PT_INDEX(addr, level);
181 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
Avi Kivity8c7bb722006-12-13 00:34:02 -0800182 u64 shadow_pte;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800183
184 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
185 if (level == PT_PAGE_TABLE_LEVEL)
186 return shadow_ent;
187 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
188 prev_shadow_ent = shadow_ent;
189 continue;
190 }
191
Avi Kivity6aa8b732006-12-10 02:21:36 -0800192 if (level == PT_PAGE_TABLE_LEVEL) {
193
194 if (walker->level == PT_DIRECTORY_LEVEL) {
195 if (prev_shadow_ent)
196 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
197 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
198 walker->inherited_ar,
199 PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
200 } else {
201 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
202 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar);
203 }
204 return shadow_ent;
205 }
206
207 shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent);
208 if (!VALID_PAGE(shadow_addr))
209 return ERR_PTR(-ENOMEM);
Avi Kivityaef3d3f2007-01-05 16:36:41 -0800210 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
211 | PT_WRITABLE_MASK | PT_USER_MASK;
Avi Kivity8c7bb722006-12-13 00:34:02 -0800212 *shadow_ent = shadow_pte;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800213 prev_shadow_ent = shadow_ent;
214 }
215}
216
217/*
218 * The guest faulted for write. We need to
219 *
220 * - check write permissions
221 * - update the guest pte dirty bit
222 * - update our own dirty page tracking structures
223 */
224static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
225 u64 *shadow_ent,
226 struct guest_walker *walker,
227 gva_t addr,
228 int user)
229{
230 pt_element_t *guest_ent;
231 int writable_shadow;
232 gfn_t gfn;
233
234 if (is_writeble_pte(*shadow_ent))
235 return 0;
236
237 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
238 if (user) {
239 /*
240 * User mode access. Fail if it's a kernel page or a read-only
241 * page.
242 */
243 if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
244 return 0;
245 ASSERT(*shadow_ent & PT_USER_MASK);
246 } else
247 /*
248 * Kernel mode access. Fail if it's a read-only page and
249 * supervisor write protection is enabled.
250 */
251 if (!writable_shadow) {
252 if (is_write_protection(vcpu))
253 return 0;
254 *shadow_ent &= ~PT_USER_MASK;
255 }
256
Avi Kivityac79c972007-01-05 16:36:40 -0800257 guest_ent = walker->ptep;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800258
259 if (!is_present_pte(*guest_ent)) {
260 *shadow_ent = 0;
261 return 0;
262 }
263
264 gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
265 mark_page_dirty(vcpu->kvm, gfn);
266 *shadow_ent |= PT_WRITABLE_MASK;
267 *guest_ent |= PT_DIRTY_MASK;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800268 rmap_add(vcpu->kvm, shadow_ent);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800269
270 return 1;
271}
272
273/*
274 * Page fault handler. There are several causes for a page fault:
275 * - there is no shadow pte for the guest pte
276 * - write access through a shadow pte marked read only so that we can set
277 * the dirty bit
278 * - write access to a shadow pte marked read only so we can update the page
279 * dirty bitmap, when userspace requests it
280 * - mmio access; in this case we will never install a present shadow pte
281 * - normal guest page fault due to the guest pte marked not present, not
282 * writable, or not executable
283 *
284 * Returns: 1 if we need to emulate the instruction, 0 otherwise
285 */
286static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
287 u32 error_code)
288{
289 int write_fault = error_code & PFERR_WRITE_MASK;
290 int pte_present = error_code & PFERR_PRESENT_MASK;
291 int user_fault = error_code & PFERR_USER_MASK;
292 struct guest_walker walker;
293 u64 *shadow_pte;
294 int fixed;
295
296 /*
297 * Look up the shadow pte for the faulting address.
298 */
299 for (;;) {
Avi Kivityac79c972007-01-05 16:36:40 -0800300 FNAME(walk_addr)(&walker, vcpu, addr);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800301 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
302 if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */
303 nonpaging_flush(vcpu);
304 FNAME(release_walker)(&walker);
305 continue;
306 }
307 break;
308 }
309
310 /*
311 * The page is not mapped by the guest. Let the guest handle it.
312 */
313 if (!shadow_pte) {
314 inject_page_fault(vcpu, addr, error_code);
315 FNAME(release_walker)(&walker);
316 return 0;
317 }
318
319 /*
320 * Update the shadow pte.
321 */
322 if (write_fault)
323 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
324 user_fault);
325 else
326 fixed = fix_read_pf(shadow_pte);
327
328 FNAME(release_walker)(&walker);
329
330 /*
331 * mmio: emulate if accessible, otherwise its a guest fault.
332 */
333 if (is_io_pte(*shadow_pte)) {
334 if (may_access(*shadow_pte, write_fault, user_fault))
335 return 1;
336 pgprintk("%s: io work, no access\n", __FUNCTION__);
337 inject_page_fault(vcpu, addr,
338 error_code | PFERR_PRESENT_MASK);
339 return 0;
340 }
341
342 /*
343 * pte not present, guest page fault.
344 */
345 if (pte_present && !fixed) {
346 inject_page_fault(vcpu, addr, error_code);
347 return 0;
348 }
349
350 ++kvm_stat.pf_fixed;
351
352 return 0;
353}
354
355static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
356{
357 struct guest_walker walker;
358 pt_element_t guest_pte;
359 gpa_t gpa;
360
Avi Kivityac79c972007-01-05 16:36:40 -0800361 FNAME(walk_addr)(&walker, vcpu, vaddr);
362 guest_pte = *walker.ptep;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800363 FNAME(release_walker)(&walker);
364
365 if (!is_present_pte(guest_pte))
366 return UNMAPPED_GVA;
367
368 if (walker.level == PT_DIRECTORY_LEVEL) {
369 ASSERT((guest_pte & PT_PAGE_SIZE_MASK));
370 ASSERT(PTTYPE == 64 || is_pse(vcpu));
371
372 gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
373 (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));
374
375 if (PTTYPE == 32 && is_cpuid_PSE36())
376 gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
377 (32 - PT32_DIR_PSE36_SHIFT);
378 } else {
379 gpa = (guest_pte & PT_BASE_ADDR_MASK);
380 gpa |= (vaddr & ~PAGE_MASK);
381 }
382
383 return gpa;
384}
385
386#undef pt_element_t
387#undef guest_walker
388#undef FNAME
389#undef PT_BASE_ADDR_MASK
390#undef PT_INDEX
391#undef SHADOW_PT_INDEX
392#undef PT_LEVEL_MASK
393#undef PT_PTE_COPY_MASK
394#undef PT_NON_PTE_COPY_MASK
395#undef PT_DIR_BASE_ADDR_MASK