blob: 67785f635399fdf26e09528589e41c41a1202a7a [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
Avi Kivity6aa8b732006-12-10 02:21:36 -080032 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
Avi Kivityc7addb92007-09-16 18:58:32 +020033 #define PT_LEVEL_BITS PT64_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -080034 #ifdef CONFIG_X86_64
35 #define PT_MAX_FULL_LEVELS 4
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050036 #define CMPXCHG cmpxchg
Avi Kivitycea0f0e2007-01-05 16:36:43 -080037 #else
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050038 #define CMPXCHG cmpxchg64
Avi Kivitycea0f0e2007-01-05 16:36:43 -080039 #define PT_MAX_FULL_LEVELS 2
40 #endif
Avi Kivity6aa8b732006-12-10 02:21:36 -080041#elif PTTYPE == 32
42 #define pt_element_t u32
43 #define guest_walker guest_walker32
44 #define FNAME(name) paging##32_##name
45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
47 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
Avi Kivity6aa8b732006-12-10 02:21:36 -080048 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
Avi Kivityc7addb92007-09-16 18:58:32 +020049 #define PT_LEVEL_BITS PT32_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -080050 #define PT_MAX_FULL_LEVELS 2
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050051 #define CMPXCHG cmpxchg
Avi Kivity6aa8b732006-12-10 02:21:36 -080052#else
53 #error Invalid PTTYPE value
54#endif
55
Avi Kivity5fb07dd2007-11-21 12:35:07 +020056#define gpte_to_gfn FNAME(gpte_to_gfn)
57#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
58
Avi Kivity6aa8b732006-12-10 02:21:36 -080059/*
60 * The guest_walker structure emulates the behavior of the hardware page
61 * table walker.
62 */
63struct guest_walker {
64 int level;
Avi Kivitycea0f0e2007-01-05 16:36:43 -080065 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
Marcelo Tosatti78190262007-12-11 19:12:27 -050066 pt_element_t ptes[PT_MAX_FULL_LEVELS];
67 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
Avi Kivityfe135d22007-12-09 16:15:46 +020068 unsigned pt_access;
69 unsigned pte_access;
Avi Kivity815af8d2007-01-05 16:36:44 -080070 gfn_t gfn;
Avi Kivity7993ba42007-01-26 00:56:41 -080071 u32 error_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -080072};
73
Avi Kivity5fb07dd2007-11-21 12:35:07 +020074static gfn_t gpte_to_gfn(pt_element_t gpte)
75{
76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
77}
78
79static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
80{
81 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
82}
83
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050084static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
85 gfn_t table_gfn, unsigned index,
86 pt_element_t orig_pte, pt_element_t new_pte)
87{
88 pt_element_t ret;
89 pt_element_t *table;
90 struct page *page;
91
92 page = gfn_to_page(kvm, table_gfn);
Izik Eidus72dc67a2008-02-10 18:04:15 +020093
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050094 table = kmap_atomic(page, KM_USER0);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050095 ret = CMPXCHG(&table[index], orig_pte, new_pte);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050096 kunmap_atomic(table, KM_USER0);
97
98 kvm_release_page_dirty(page);
99
100 return (ret != orig_pte);
101}
102
Avi Kivitybedbe4e2007-12-09 16:52:56 +0200103static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
104{
105 unsigned access;
106
107 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
108#if PTTYPE == 64
109 if (is_nx(vcpu))
110 access &= ~(gpte >> PT64_NX_SHIFT);
111#endif
112 return access;
113}
114
Avi Kivityac79c972007-01-05 16:36:40 -0800115/*
116 * Fetch a guest pte for a guest virtual address
117 */
Avi Kivity7993ba42007-01-26 00:56:41 -0800118static int FNAME(walk_addr)(struct guest_walker *walker,
119 struct kvm_vcpu *vcpu, gva_t addr,
Avi Kivity73b10872007-01-26 00:56:41 -0800120 int write_fault, int user_fault, int fetch_fault)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800121{
Avi Kivity42bf3f02007-10-17 12:18:47 +0200122 pt_element_t pte;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800123 gfn_t table_gfn;
Avi Kivityfe135d22007-12-09 16:15:46 +0200124 unsigned index, pt_access, pte_access;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200125 gpa_t pte_gpa;
Dong, Eddie82725b22009-03-30 16:21:08 +0800126 int rsvd_fault = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800127
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800128 pgprintk("%s: addr %lx\n", __func__, addr);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500129walk:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800130 walker->level = vcpu->arch.mmu.root_level;
131 pte = vcpu->arch.cr3;
Avi Kivity1b0973b2007-01-05 16:36:41 -0800132#if PTTYPE == 64
133 if (!is_long_mode(vcpu)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800134 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
Avi Kivity42bf3f02007-10-17 12:18:47 +0200135 if (!is_present_pte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800136 goto not_present;
Avi Kivity1b0973b2007-01-05 16:36:41 -0800137 --walker->level;
138 }
139#endif
Avi Kivitya9058ec2006-12-29 16:49:37 -0800140 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
Marcelo Tosatti24993d52008-02-14 21:25:39 -0200141 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800142
Avi Kivityfe135d22007-12-09 16:15:46 +0200143 pt_access = ACC_ALL;
Avi Kivityac79c972007-01-05 16:36:40 -0800144
145 for (;;) {
Avi Kivity42bf3f02007-10-17 12:18:47 +0200146 index = PT_INDEX(addr, walker->level);
Avi Kivityac79c972007-01-05 16:36:40 -0800147
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200148 table_gfn = gpte_to_gfn(pte);
Avi Kivity1755fbc2007-11-21 14:44:45 +0200149 pte_gpa = gfn_to_gpa(table_gfn);
Izik Eidusec8d4ea2007-11-19 11:28:19 +0200150 pte_gpa += index * sizeof(pt_element_t);
Avi Kivity42bf3f02007-10-17 12:18:47 +0200151 walker->table_gfn[walker->level - 1] = table_gfn;
Marcelo Tosatti78190262007-12-11 19:12:27 -0500152 walker->pte_gpa[walker->level - 1] = pte_gpa;
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800153 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
Avi Kivity42bf3f02007-10-17 12:18:47 +0200154 walker->level - 1, table_gfn);
Avi Kivityac79c972007-01-05 16:36:40 -0800155
Izik Eidusec8d4ea2007-11-19 11:28:19 +0200156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
Avi Kivity42bf3f02007-10-17 12:18:47 +0200157
158 if (!is_present_pte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800159 goto not_present;
160
Dong, Eddie82725b22009-03-30 16:21:08 +0800161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
162 if (rsvd_fault)
163 goto access_error;
164
Avi Kivity42bf3f02007-10-17 12:18:47 +0200165 if (write_fault && !is_writeble_pte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800166 if (user_fault || is_write_protection(vcpu))
167 goto access_error;
168
Avi Kivity42bf3f02007-10-17 12:18:47 +0200169 if (user_fault && !(pte & PT_USER_MASK))
Avi Kivity7993ba42007-01-26 00:56:41 -0800170 goto access_error;
171
Avi Kivity73b10872007-01-26 00:56:41 -0800172#if PTTYPE == 64
Avi Kivity42bf3f02007-10-17 12:18:47 +0200173 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
Avi Kivity73b10872007-01-26 00:56:41 -0800174 goto access_error;
175#endif
176
Avi Kivity42bf3f02007-10-17 12:18:47 +0200177 if (!(pte & PT_ACCESSED_MASK)) {
Avi Kivitybf3f8e82007-02-19 14:37:46 +0200178 mark_page_dirty(vcpu->kvm, table_gfn);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
180 index, pte, pte|PT_ACCESSED_MASK))
181 goto walk;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200182 pte |= PT_ACCESSED_MASK;
Avi Kivitybf3f8e82007-02-19 14:37:46 +0200183 }
Avi Kivityac79c972007-01-05 16:36:40 -0800184
Avi Kivitybedbe4e2007-12-09 16:52:56 +0200185 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
Avi Kivityfe135d22007-12-09 16:15:46 +0200186
Marcelo Tosatti78190262007-12-11 19:12:27 -0500187 walker->ptes[walker->level - 1] = pte;
188
Avi Kivity815af8d2007-01-05 16:36:44 -0800189 if (walker->level == PT_PAGE_TABLE_LEVEL) {
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200190 walker->gfn = gpte_to_gfn(pte);
Avi Kivity815af8d2007-01-05 16:36:44 -0800191 break;
192 }
193
194 if (walker->level == PT_DIRECTORY_LEVEL
Avi Kivity42bf3f02007-10-17 12:18:47 +0200195 && (pte & PT_PAGE_SIZE_MASK)
Avi Kivity815af8d2007-01-05 16:36:44 -0800196 && (PTTYPE == 64 || is_pse(vcpu))) {
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200197 walker->gfn = gpte_to_gfn_pde(pte);
Avi Kivity815af8d2007-01-05 16:36:44 -0800198 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
Avi Kivityda928522007-11-21 13:54:47 +0200199 if (PTTYPE == 32 && is_cpuid_PSE36())
200 walker->gfn += pse36_gfn_delta(pte);
Avi Kivity815af8d2007-01-05 16:36:44 -0800201 break;
202 }
203
Avi Kivityfe135d22007-12-09 16:15:46 +0200204 pt_access = pte_access;
Avi Kivityac79c972007-01-05 16:36:40 -0800205 --walker->level;
206 }
Avi Kivity42bf3f02007-10-17 12:18:47 +0200207
208 if (write_fault && !is_dirty_pte(pte)) {
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500209 bool ret;
210
Avi Kivity42bf3f02007-10-17 12:18:47 +0200211 mark_page_dirty(vcpu->kvm, table_gfn);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500212 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
213 pte|PT_DIRTY_MASK);
214 if (ret)
215 goto walk;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200216 pte |= PT_DIRTY_MASK;
Marcelo Tosatti78190262007-12-11 19:12:27 -0500217 walker->ptes[walker->level - 1] = pte;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200218 }
219
Avi Kivityfe135d22007-12-09 16:15:46 +0200220 walker->pt_access = pt_access;
221 walker->pte_access = pte_access;
222 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800223 __func__, (u64)pte, pt_access, pte_access);
Avi Kivity7993ba42007-01-26 00:56:41 -0800224 return 1;
225
226not_present:
227 walker->error_code = 0;
228 goto err;
229
230access_error:
231 walker->error_code = PFERR_PRESENT_MASK;
232
233err:
234 if (write_fault)
235 walker->error_code |= PFERR_WRITE_MASK;
236 if (user_fault)
237 walker->error_code |= PFERR_USER_MASK;
Avi Kivity73b10872007-01-26 00:56:41 -0800238 if (fetch_fault)
239 walker->error_code |= PFERR_FETCH_MASK;
Dong, Eddie82725b22009-03-30 16:21:08 +0800240 if (rsvd_fault)
241 walker->error_code |= PFERR_RSVD_MASK;
Shaohua Life551882007-07-23 14:51:39 +0800242 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800243}
244
Avi Kivity00284252007-05-01 16:53:31 +0300245static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
Dong, Eddie489f1d62008-01-07 11:14:20 +0200246 u64 *spte, const void *pte)
Avi Kivity00284252007-05-01 16:53:31 +0300247{
248 pt_element_t gpte;
Avi Kivity41074d02007-12-09 17:00:02 +0200249 unsigned pte_access;
Anthony Liguori35149e22008-04-02 14:46:56 -0500250 pfn_t pfn;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300251 int largepage = vcpu->arch.update_pte.largepage;
Avi Kivity00284252007-05-01 16:53:31 +0300252
Avi Kivity00284252007-05-01 16:53:31 +0300253 gpte = *(const pt_element_t *)pte;
Avi Kivityc7addb92007-09-16 18:58:32 +0200254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
Dong, Eddie489f1d62008-01-07 11:14:20 +0200255 if (!is_present_pte(gpte))
Avi Kivityc7addb92007-09-16 18:58:32 +0200256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
257 return;
258 }
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800259 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
Avi Kivity41074d02007-12-09 17:00:02 +0200260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
Avi Kivityd7824ff2007-12-30 12:29:05 +0200261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
262 return;
Anthony Liguori35149e22008-04-02 14:46:56 -0500263 pfn = vcpu->arch.update_pte.pfn;
264 if (is_error_pfn(pfn))
Avi Kivityd7824ff2007-12-30 12:29:05 +0200265 return;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200266 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
267 return;
Anthony Liguori35149e22008-04-02 14:46:56 -0500268 kvm_get_pfn(pfn);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +0200269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
Marcelo Tosatti6cffe8c2008-12-01 22:32:04 -0200270 gpte & PT_DIRTY_MASK, NULL, largepage,
Marcelo Tosattic2d0ee42009-04-05 14:54:47 -0300271 gpte_to_gfn(gpte), pfn, true);
Avi Kivity00284252007-05-01 16:53:31 +0300272}
273
Avi Kivity6aa8b732006-12-10 02:21:36 -0800274/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800275 * Fetch a shadow pte for a specific level in the paging hierarchy.
276 */
277static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
Avi Kivitye7a04c92008-12-25 15:10:50 +0200278 struct guest_walker *gw,
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300279 int user_fault, int write_fault, int largepage,
Anthony Liguori35149e22008-04-02 14:46:56 -0500280 int *ptwrite, pfn_t pfn)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800281{
Avi Kivitye7a04c92008-12-25 15:10:50 +0200282 unsigned access = gw->pt_access;
283 struct kvm_mmu_page *shadow_page;
Jaswinder Singh Rajputbde89222009-05-20 09:59:35 +0530284 u64 spte, *sptep = NULL;
Avi Kivityf6e2c022009-01-11 13:02:10 +0200285 int direct;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200286 gfn_t table_gfn;
287 int r;
288 int level;
289 pt_element_t curr_pte;
290 struct kvm_shadow_walk_iterator iterator;
Avi Kivityac79c972007-01-05 16:36:40 -0800291
Avi Kivitye7a04c92008-12-25 15:10:50 +0200292 if (!is_present_pte(gw->ptes[gw->level - 1]))
Avi Kivityac79c972007-01-05 16:36:40 -0800293 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800294
Avi Kivitye7a04c92008-12-25 15:10:50 +0200295 for_each_shadow_entry(vcpu, addr, iterator) {
296 level = iterator.level;
297 sptep = iterator.sptep;
298 if (level == PT_PAGE_TABLE_LEVEL
299 || (largepage && level == PT_DIRECTORY_LEVEL)) {
300 mmu_set_spte(vcpu, sptep, access,
301 gw->pte_access & access,
302 user_fault, write_fault,
303 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
304 ptwrite, largepage,
Avi Kivitye7a04c92008-12-25 15:10:50 +0200305 gw->gfn, pfn, false);
306 break;
307 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800308
Avi Kivitye7a04c92008-12-25 15:10:50 +0200309 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
310 continue;
311
312 if (is_large_pte(*sptep)) {
Joerg Roedelc5bc2242009-02-19 12:18:56 +0100313 rmap_remove(vcpu->kvm, sptep);
Avi Kivitye7a04c92008-12-25 15:10:50 +0200314 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
315 kvm_flush_remote_tlbs(vcpu->kvm);
Avi Kivitye7a04c92008-12-25 15:10:50 +0200316 }
317
318 if (level == PT_DIRECTORY_LEVEL
319 && gw->level == PT_DIRECTORY_LEVEL) {
Avi Kivityf6e2c022009-01-11 13:02:10 +0200320 direct = 1;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200321 if (!is_dirty_pte(gw->ptes[level - 1]))
322 access &= ~ACC_WRITE_MASK;
323 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
324 } else {
Avi Kivityf6e2c022009-01-11 13:02:10 +0200325 direct = 0;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200326 table_gfn = gw->table_gfn[level - 2];
327 }
328 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
Avi Kivityf6e2c022009-01-11 13:02:10 +0200329 direct, access, sptep);
330 if (!direct) {
Avi Kivitye7a04c92008-12-25 15:10:50 +0200331 r = kvm_read_guest_atomic(vcpu->kvm,
332 gw->pte_gpa[level - 2],
333 &curr_pte, sizeof(curr_pte));
334 if (r || curr_pte != gw->ptes[level - 2]) {
335 kvm_mmu_put_page(shadow_page, sptep);
336 kvm_release_pfn_clean(pfn);
337 sptep = NULL;
338 break;
339 }
340 }
341
342 spte = __pa(shadow_page->spt)
343 | PT_PRESENT_MASK | PT_ACCESSED_MASK
344 | PT_WRITABLE_MASK | PT_USER_MASK;
345 *sptep = spte;
346 }
347
348 return sptep;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800349}
350
351/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800352 * Page fault handler. There are several causes for a page fault:
353 * - there is no shadow pte for the guest pte
354 * - write access through a shadow pte marked read only so that we can set
355 * the dirty bit
356 * - write access to a shadow pte marked read only so we can update the page
357 * dirty bitmap, when userspace requests it
358 * - mmio access; in this case we will never install a present shadow pte
359 * - normal guest page fault due to the guest pte marked not present, not
360 * writable, or not executable
361 *
Avi Kivitye2dec932007-01-05 16:36:54 -0800362 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
363 * a negative value on error.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800364 */
365static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
366 u32 error_code)
367{
368 int write_fault = error_code & PFERR_WRITE_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800369 int user_fault = error_code & PFERR_USER_MASK;
Avi Kivity73b10872007-01-26 00:56:41 -0800370 int fetch_fault = error_code & PFERR_FETCH_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800371 struct guest_walker walker;
372 u64 *shadow_pte;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800373 int write_pt = 0;
Avi Kivitye2dec932007-01-05 16:36:54 -0800374 int r;
Anthony Liguori35149e22008-04-02 14:46:56 -0500375 pfn_t pfn;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300376 int largepage = 0;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200377 unsigned long mmu_seq;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800378
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800379 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
Avi Kivity37a7d8b2007-01-05 16:36:56 -0800380 kvm_mmu_audit(vcpu, "pre page fault");
Avi Kivity714b93d2007-01-05 16:36:53 -0800381
Avi Kivitye2dec932007-01-05 16:36:54 -0800382 r = mmu_topup_memory_caches(vcpu);
383 if (r)
384 return r;
Avi Kivity714b93d2007-01-05 16:36:53 -0800385
Avi Kivity6aa8b732006-12-10 02:21:36 -0800386 /*
Eddie Donga8b876b2009-03-26 15:28:40 +0800387 * Look up the guest pte for the faulting address.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800388 */
Avi Kivity73b10872007-01-26 00:56:41 -0800389 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
390 fetch_fault);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800391
392 /*
393 * The page is not mapped by the guest. Let the guest handle it.
394 */
Avi Kivity7993ba42007-01-26 00:56:41 -0800395 if (!r) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800396 pgprintk("%s: guest page fault\n", __func__);
Avi Kivity7993ba42007-01-26 00:56:41 -0800397 inject_page_fault(vcpu, addr, walker.error_code);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800398 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800399 return 0;
400 }
401
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300402 if (walker.level == PT_DIRECTORY_LEVEL) {
403 gfn_t large_gfn;
404 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
405 if (is_largepage_backed(vcpu, large_gfn)) {
406 walker.gfn = large_gfn;
407 largepage = 1;
408 }
409 }
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200410 mmu_seq = vcpu->kvm->mmu_notifier_seq;
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -0300411 smp_rmb();
Anthony Liguori35149e22008-04-02 14:46:56 -0500412 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
Avi Kivityd7824ff2007-12-30 12:29:05 +0200413
Avi Kivityd196e342008-01-24 11:44:11 +0200414 /* mmio */
Anthony Liguori35149e22008-04-02 14:46:56 -0500415 if (is_error_pfn(pfn)) {
Avi Kivityebb0e622008-05-20 16:21:58 +0300416 pgprintk("gfn %lx is mmio\n", walker.gfn);
Anthony Liguori35149e22008-04-02 14:46:56 -0500417 kvm_release_pfn_clean(pfn);
Avi Kivityd196e342008-01-24 11:44:11 +0200418 return 1;
419 }
420
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -0500421 spin_lock(&vcpu->kvm->mmu_lock);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200422 if (mmu_notifier_retry(vcpu, mmu_seq))
423 goto out_unlock;
Avi Kivityeb787d12007-12-31 15:27:49 +0200424 kvm_mmu_free_some_pages(vcpu);
Avi Kivity97a0a012007-05-31 15:08:29 +0300425 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
Anthony Liguori35149e22008-04-02 14:46:56 -0500426 largepage, &write_pt, pfn);
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300427
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800428 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
Avi Kivity97a0a012007-05-31 15:08:29 +0300429 shadow_pte, *shadow_pte, write_pt);
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800430
Avi Kivitya25f7e12007-04-30 17:05:38 +0300431 if (!write_pt)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800432 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
Avi Kivitya25f7e12007-04-30 17:05:38 +0300433
Avi Kivity1165f5f2007-04-19 17:27:43 +0300434 ++vcpu->stat.pf_fixed;
Avi Kivity37a7d8b2007-01-05 16:36:56 -0800435 kvm_mmu_audit(vcpu, "post page fault (fixed)");
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -0500436 spin_unlock(&vcpu->kvm->mmu_lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800437
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800438 return write_pt;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200439
440out_unlock:
441 spin_unlock(&vcpu->kvm->mmu_lock);
442 kvm_release_pfn_clean(pfn);
443 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800444}
445
Marcelo Tosattia7052892008-09-23 13:18:35 -0300446static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
447{
Avi Kivitya4619302008-12-25 15:19:00 +0200448 struct kvm_shadow_walk_iterator iterator;
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200449 pt_element_t gpte;
Avi Kivitya4619302008-12-25 15:19:00 +0200450 gpa_t pte_gpa = -1;
451 int level;
452 u64 *sptep;
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100453 int need_flush = 0;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300454
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200455 spin_lock(&vcpu->kvm->mmu_lock);
Avi Kivitya4619302008-12-25 15:19:00 +0200456
457 for_each_shadow_entry(vcpu, gva, iterator) {
458 level = iterator.level;
459 sptep = iterator.sptep;
460
461 /* FIXME: properly handle invlpg on large guest pages */
462 if (level == PT_PAGE_TABLE_LEVEL ||
463 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
464 struct kvm_mmu_page *sp = page_header(__pa(sptep));
465
466 pte_gpa = (sp->gfn << PAGE_SHIFT);
467 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
468
469 if (is_shadow_present_pte(*sptep)) {
470 rmap_remove(vcpu->kvm, sptep);
471 if (is_large_pte(*sptep))
472 --vcpu->kvm->stat.lpages;
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100473 need_flush = 1;
Avi Kivitya4619302008-12-25 15:19:00 +0200474 }
475 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
476 break;
477 }
478
479 if (!is_shadow_present_pte(*sptep))
480 break;
481 }
482
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100483 if (need_flush)
484 kvm_flush_remote_tlbs(vcpu->kvm);
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200485 spin_unlock(&vcpu->kvm->mmu_lock);
Avi Kivitya4619302008-12-25 15:19:00 +0200486
487 if (pte_gpa == -1)
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200488 return;
Avi Kivitya4619302008-12-25 15:19:00 +0200489 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200490 sizeof(pt_element_t)))
491 return;
492 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
493 if (mmu_topup_memory_caches(vcpu))
494 return;
Avi Kivitya4619302008-12-25 15:19:00 +0200495 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200496 sizeof(pt_element_t), 0);
497 }
Marcelo Tosattia7052892008-09-23 13:18:35 -0300498}
499
Avi Kivity6aa8b732006-12-10 02:21:36 -0800500static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
501{
502 struct guest_walker walker;
Avi Kivitye119d112007-02-12 00:54:36 -0800503 gpa_t gpa = UNMAPPED_GVA;
504 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800505
Avi Kivitye119d112007-02-12 00:54:36 -0800506 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800507
Avi Kivitye119d112007-02-12 00:54:36 -0800508 if (r) {
Avi Kivity1755fbc2007-11-21 14:44:45 +0200509 gpa = gfn_to_gpa(walker.gfn);
Avi Kivitye119d112007-02-12 00:54:36 -0800510 gpa |= vaddr & ~PAGE_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800511 }
512
513 return gpa;
514}
515
Avi Kivityc7addb92007-09-16 18:58:32 +0200516static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
517 struct kvm_mmu_page *sp)
518{
Avi Kivityeab9f712008-05-29 14:20:16 +0300519 int i, j, offset, r;
520 pt_element_t pt[256 / sizeof(pt_element_t)];
521 gpa_t pte_gpa;
Avi Kivityc7addb92007-09-16 18:58:32 +0200522
Avi Kivityf6e2c022009-01-11 13:02:10 +0200523 if (sp->role.direct
Avi Kivitye5a4c8c2007-11-20 21:39:54 +0200524 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
Avi Kivityc7addb92007-09-16 18:58:32 +0200525 nonpaging_prefetch_page(vcpu, sp);
526 return;
527 }
528
Avi Kivityeab9f712008-05-29 14:20:16 +0300529 pte_gpa = gfn_to_gpa(sp->gfn);
530 if (PTTYPE == 32) {
Avi Kivitye5a4c8c2007-11-20 21:39:54 +0200531 offset = sp->role.quadrant << PT64_LEVEL_BITS;
Avi Kivityeab9f712008-05-29 14:20:16 +0300532 pte_gpa += offset * sizeof(pt_element_t);
533 }
Marcelo Tosatti7ec54582007-12-20 19:18:23 -0500534
Avi Kivityeab9f712008-05-29 14:20:16 +0300535 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
536 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
537 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
538 for (j = 0; j < ARRAY_SIZE(pt); ++j)
539 if (r || is_present_pte(pt[j]))
540 sp->spt[i+j] = shadow_trap_nonpresent_pte;
541 else
542 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
Marcelo Tosatti7ec54582007-12-20 19:18:23 -0500543 }
Avi Kivityc7addb92007-09-16 18:58:32 +0200544}
545
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300546/*
547 * Using the cached information from sp->gfns is safe because:
548 * - The spte has a reference to the struct page, so the pfn for a given gfn
549 * can't change unless all sptes pointing to it are nuked first.
550 * - Alias changes zap the entire shadow cache.
551 */
552static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
553{
554 int i, offset, nr_present;
555
556 offset = nr_present = 0;
557
558 if (PTTYPE == 32)
559 offset = sp->role.quadrant << PT64_LEVEL_BITS;
560
561 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
562 unsigned pte_access;
563 pt_element_t gpte;
564 gpa_t pte_gpa;
565 gfn_t gfn = sp->gfns[i];
566
567 if (!is_shadow_present_pte(sp->spt[i]))
568 continue;
569
570 pte_gpa = gfn_to_gpa(sp->gfn);
571 pte_gpa += (i+offset) * sizeof(pt_element_t);
572
573 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
574 sizeof(pt_element_t)))
575 return -EINVAL;
576
577 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
578 !(gpte & PT_ACCESSED_MASK)) {
579 u64 nonpresent;
580
581 rmap_remove(vcpu->kvm, &sp->spt[i]);
582 if (is_present_pte(gpte))
583 nonpresent = shadow_trap_nonpresent_pte;
584 else
585 nonpresent = shadow_notrap_nonpresent_pte;
586 set_shadow_pte(&sp->spt[i], nonpresent);
587 continue;
588 }
589
590 nr_present++;
591 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
Marcelo Tosattic2d0ee42009-04-05 14:54:47 -0300593 is_dirty_pte(gpte), 0, gfn,
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300594 spte_to_pfn(sp->spt[i]), true, false);
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300595 }
596
597 return !nr_present;
598}
599
Avi Kivity6aa8b732006-12-10 02:21:36 -0800600#undef pt_element_t
601#undef guest_walker
602#undef FNAME
603#undef PT_BASE_ADDR_MASK
604#undef PT_INDEX
Avi Kivity6aa8b732006-12-10 02:21:36 -0800605#undef PT_LEVEL_MASK
Avi Kivity6aa8b732006-12-10 02:21:36 -0800606#undef PT_DIR_BASE_ADDR_MASK
Avi Kivityc7addb92007-09-16 18:58:32 +0200607#undef PT_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800608#undef PT_MAX_FULL_LEVELS
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200609#undef gpte_to_gfn
610#undef gpte_to_gfn_pde
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500611#undef CMPXCHG