blob: d2fec9c12d224e60ce5e8755383e4387ea642ca7 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
Joerg Roedele04da982009-07-27 16:30:45 +020030 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
31 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
Avi Kivity6aa8b732006-12-10 02:21:36 -080032 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
Avi Kivity6aa8b732006-12-10 02:21:36 -080033 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
Avi Kivityc7addb92007-09-16 18:58:32 +020034 #define PT_LEVEL_BITS PT64_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -080035 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050037 #define CMPXCHG cmpxchg
Avi Kivitycea0f0e2007-01-05 16:36:43 -080038 #else
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050039 #define CMPXCHG cmpxchg64
Avi Kivitycea0f0e2007-01-05 16:36:43 -080040 #define PT_MAX_FULL_LEVELS 2
41 #endif
Avi Kivity6aa8b732006-12-10 02:21:36 -080042#elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
Joerg Roedele04da982009-07-27 16:30:45 +020047 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
Avi Kivity6aa8b732006-12-10 02:21:36 -080049 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
Avi Kivity6aa8b732006-12-10 02:21:36 -080050 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
Avi Kivityc7addb92007-09-16 18:58:32 +020051 #define PT_LEVEL_BITS PT32_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -080052 #define PT_MAX_FULL_LEVELS 2
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050053 #define CMPXCHG cmpxchg
Avi Kivity6aa8b732006-12-10 02:21:36 -080054#else
55 #error Invalid PTTYPE value
56#endif
57
Joerg Roedele04da982009-07-27 16:30:45 +020058#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
59#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
Avi Kivity5fb07dd2007-11-21 12:35:07 +020060
Avi Kivity6aa8b732006-12-10 02:21:36 -080061/*
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
64 */
65struct guest_walker {
66 int level;
Avi Kivitycea0f0e2007-01-05 16:36:43 -080067 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
Marcelo Tosatti78190262007-12-11 19:12:27 -050068 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
Avi Kivityfe135d22007-12-09 16:15:46 +020070 unsigned pt_access;
71 unsigned pte_access;
Avi Kivity815af8d2007-01-05 16:36:44 -080072 gfn_t gfn;
Avi Kivity7993ba42007-01-26 00:56:41 -080073 u32 error_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -080074};
75
Joerg Roedele04da982009-07-27 16:30:45 +020076static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
Avi Kivity5fb07dd2007-11-21 12:35:07 +020077{
Joerg Roedele04da982009-07-27 16:30:45 +020078 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
Avi Kivity5fb07dd2007-11-21 12:35:07 +020079}
80
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050081static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
82 gfn_t table_gfn, unsigned index,
83 pt_element_t orig_pte, pt_element_t new_pte)
84{
85 pt_element_t ret;
86 pt_element_t *table;
87 struct page *page;
88
89 page = gfn_to_page(kvm, table_gfn);
Izik Eidus72dc67a2008-02-10 18:04:15 +020090
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050091 table = kmap_atomic(page, KM_USER0);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050092 ret = CMPXCHG(&table[index], orig_pte, new_pte);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -050093 kunmap_atomic(table, KM_USER0);
94
95 kvm_release_page_dirty(page);
96
97 return (ret != orig_pte);
98}
99
Avi Kivitybedbe4e2007-12-09 16:52:56 +0200100static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
101{
102 unsigned access;
103
104 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
105#if PTTYPE == 64
106 if (is_nx(vcpu))
107 access &= ~(gpte >> PT64_NX_SHIFT);
108#endif
109 return access;
110}
111
Avi Kivityac79c972007-01-05 16:36:40 -0800112/*
113 * Fetch a guest pte for a guest virtual address
114 */
Avi Kivity7993ba42007-01-26 00:56:41 -0800115static int FNAME(walk_addr)(struct guest_walker *walker,
116 struct kvm_vcpu *vcpu, gva_t addr,
Avi Kivity73b10872007-01-26 00:56:41 -0800117 int write_fault, int user_fault, int fetch_fault)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800118{
Avi Kivity42bf3f02007-10-17 12:18:47 +0200119 pt_element_t pte;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800120 gfn_t table_gfn;
Avi Kivityfe135d22007-12-09 16:15:46 +0200121 unsigned index, pt_access, pte_access;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200122 gpa_t pte_gpa;
Dong, Eddie82725b22009-03-30 16:21:08 +0800123 int rsvd_fault = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800124
Avi Kivity07420172009-07-06 12:21:32 +0300125 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
126 fetch_fault);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500127walk:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800128 walker->level = vcpu->arch.mmu.root_level;
129 pte = vcpu->arch.cr3;
Avi Kivity1b0973b2007-01-05 16:36:41 -0800130#if PTTYPE == 64
131 if (!is_long_mode(vcpu)) {
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300132 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
Avi Kivity07420172009-07-06 12:21:32 +0300133 trace_kvm_mmu_paging_element(pte, walker->level);
Avi Kivity43a37952009-06-10 14:12:05 +0300134 if (!is_present_gpte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800135 goto not_present;
Avi Kivity1b0973b2007-01-05 16:36:41 -0800136 --walker->level;
137 }
138#endif
Avi Kivitya9058ec2006-12-29 16:49:37 -0800139 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
Marcelo Tosatti24993d52008-02-14 21:25:39 -0200140 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800141
Avi Kivityfe135d22007-12-09 16:15:46 +0200142 pt_access = ACC_ALL;
Avi Kivityac79c972007-01-05 16:36:40 -0800143
144 for (;;) {
Avi Kivity42bf3f02007-10-17 12:18:47 +0200145 index = PT_INDEX(addr, walker->level);
Avi Kivityac79c972007-01-05 16:36:40 -0800146
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200147 table_gfn = gpte_to_gfn(pte);
Avi Kivity1755fbc2007-11-21 14:44:45 +0200148 pte_gpa = gfn_to_gpa(table_gfn);
Izik Eidusec8d4ea2007-11-19 11:28:19 +0200149 pte_gpa += index * sizeof(pt_element_t);
Avi Kivity42bf3f02007-10-17 12:18:47 +0200150 walker->table_gfn[walker->level - 1] = table_gfn;
Marcelo Tosatti78190262007-12-11 19:12:27 -0500151 walker->pte_gpa[walker->level - 1] = pte_gpa;
Avi Kivityac79c972007-01-05 16:36:40 -0800152
Izik Eidusec8d4ea2007-11-19 11:28:19 +0200153 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
Avi Kivity07420172009-07-06 12:21:32 +0300154 trace_kvm_mmu_paging_element(pte, walker->level);
Avi Kivity42bf3f02007-10-17 12:18:47 +0200155
Avi Kivity43a37952009-06-10 14:12:05 +0300156 if (!is_present_gpte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800157 goto not_present;
158
Dong, Eddie82725b22009-03-30 16:21:08 +0800159 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
160 if (rsvd_fault)
161 goto access_error;
162
Avi Kivity42bf3f02007-10-17 12:18:47 +0200163 if (write_fault && !is_writeble_pte(pte))
Avi Kivity7993ba42007-01-26 00:56:41 -0800164 if (user_fault || is_write_protection(vcpu))
165 goto access_error;
166
Avi Kivity42bf3f02007-10-17 12:18:47 +0200167 if (user_fault && !(pte & PT_USER_MASK))
Avi Kivity7993ba42007-01-26 00:56:41 -0800168 goto access_error;
169
Avi Kivity73b10872007-01-26 00:56:41 -0800170#if PTTYPE == 64
Avi Kivity42bf3f02007-10-17 12:18:47 +0200171 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
Avi Kivity73b10872007-01-26 00:56:41 -0800172 goto access_error;
173#endif
174
Avi Kivity42bf3f02007-10-17 12:18:47 +0200175 if (!(pte & PT_ACCESSED_MASK)) {
Avi Kivity07420172009-07-06 12:21:32 +0300176 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
177 sizeof(pte));
Avi Kivitybf3f8e82007-02-19 14:37:46 +0200178 mark_page_dirty(vcpu->kvm, table_gfn);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
180 index, pte, pte|PT_ACCESSED_MASK))
181 goto walk;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200182 pte |= PT_ACCESSED_MASK;
Avi Kivitybf3f8e82007-02-19 14:37:46 +0200183 }
Avi Kivityac79c972007-01-05 16:36:40 -0800184
Avi Kivitybedbe4e2007-12-09 16:52:56 +0200185 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
Avi Kivityfe135d22007-12-09 16:15:46 +0200186
Marcelo Tosatti78190262007-12-11 19:12:27 -0500187 walker->ptes[walker->level - 1] = pte;
188
Joerg Roedele04da982009-07-27 16:30:45 +0200189 if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
190 ((walker->level == PT_DIRECTORY_LEVEL) &&
191 (pte & PT_PAGE_SIZE_MASK) &&
192 (PTTYPE == 64 || is_pse(vcpu))) ||
193 ((walker->level == PT_PDPE_LEVEL) &&
194 (pte & PT_PAGE_SIZE_MASK) &&
195 is_long_mode(vcpu))) {
196 int lvl = walker->level;
Avi Kivity815af8d2007-01-05 16:36:44 -0800197
Joerg Roedele04da982009-07-27 16:30:45 +0200198 walker->gfn = gpte_to_gfn_lvl(pte, lvl);
199 walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl))
200 >> PAGE_SHIFT;
201
202 if (PTTYPE == 32 &&
203 walker->level == PT_DIRECTORY_LEVEL &&
204 is_cpuid_PSE36())
Avi Kivityda928522007-11-21 13:54:47 +0200205 walker->gfn += pse36_gfn_delta(pte);
Joerg Roedele04da982009-07-27 16:30:45 +0200206
Avi Kivity815af8d2007-01-05 16:36:44 -0800207 break;
208 }
209
Avi Kivityfe135d22007-12-09 16:15:46 +0200210 pt_access = pte_access;
Avi Kivityac79c972007-01-05 16:36:40 -0800211 --walker->level;
212 }
Avi Kivity42bf3f02007-10-17 12:18:47 +0200213
Avi Kivity43a37952009-06-10 14:12:05 +0300214 if (write_fault && !is_dirty_gpte(pte)) {
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500215 bool ret;
216
Avi Kivity07420172009-07-06 12:21:32 +0300217 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
Avi Kivity42bf3f02007-10-17 12:18:47 +0200218 mark_page_dirty(vcpu->kvm, table_gfn);
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500219 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
220 pte|PT_DIRTY_MASK);
221 if (ret)
222 goto walk;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200223 pte |= PT_DIRTY_MASK;
Marcelo Tosatti78190262007-12-11 19:12:27 -0500224 walker->ptes[walker->level - 1] = pte;
Avi Kivity42bf3f02007-10-17 12:18:47 +0200225 }
226
Avi Kivityfe135d22007-12-09 16:15:46 +0200227 walker->pt_access = pt_access;
228 walker->pte_access = pte_access;
229 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800230 __func__, (u64)pte, pt_access, pte_access);
Avi Kivity7993ba42007-01-26 00:56:41 -0800231 return 1;
232
233not_present:
234 walker->error_code = 0;
235 goto err;
236
237access_error:
238 walker->error_code = PFERR_PRESENT_MASK;
239
240err:
241 if (write_fault)
242 walker->error_code |= PFERR_WRITE_MASK;
243 if (user_fault)
244 walker->error_code |= PFERR_USER_MASK;
Avi Kivity73b10872007-01-26 00:56:41 -0800245 if (fetch_fault)
246 walker->error_code |= PFERR_FETCH_MASK;
Dong, Eddie82725b22009-03-30 16:21:08 +0800247 if (rsvd_fault)
248 walker->error_code |= PFERR_RSVD_MASK;
Avi Kivity07420172009-07-06 12:21:32 +0300249 trace_kvm_mmu_walker_error(walker->error_code);
Shaohua Life551882007-07-23 14:51:39 +0800250 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800251}
252
Avi Kivity00284252007-05-01 16:53:31 +0300253static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
Dong, Eddie489f1d62008-01-07 11:14:20 +0200254 u64 *spte, const void *pte)
Avi Kivity00284252007-05-01 16:53:31 +0300255{
256 pt_element_t gpte;
Avi Kivity41074d02007-12-09 17:00:02 +0200257 unsigned pte_access;
Anthony Liguori35149e22008-04-02 14:46:56 -0500258 pfn_t pfn;
Avi Kivity00284252007-05-01 16:53:31 +0300259
Avi Kivity00284252007-05-01 16:53:31 +0300260 gpte = *(const pt_element_t *)pte;
Avi Kivityc7addb92007-09-16 18:58:32 +0200261 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
Avi Kivity43a37952009-06-10 14:12:05 +0300262 if (!is_present_gpte(gpte))
Avi Kivityd555c332009-06-10 14:24:23 +0300263 __set_spte(spte, shadow_notrap_nonpresent_pte);
Avi Kivityc7addb92007-09-16 18:58:32 +0200264 return;
265 }
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800266 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
Avi Kivity41074d02007-12-09 17:00:02 +0200267 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
Avi Kivityd7824ff2007-12-30 12:29:05 +0200268 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
269 return;
Anthony Liguori35149e22008-04-02 14:46:56 -0500270 pfn = vcpu->arch.update_pte.pfn;
271 if (is_error_pfn(pfn))
Avi Kivityd7824ff2007-12-30 12:29:05 +0200272 return;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
274 return;
Anthony Liguori35149e22008-04-02 14:46:56 -0500275 kvm_get_pfn(pfn);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +0200276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200277 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
Marcelo Tosattic2d0ee42009-04-05 14:54:47 -0300278 gpte_to_gfn(gpte), pfn, true);
Avi Kivity00284252007-05-01 16:53:31 +0300279}
280
Avi Kivity6aa8b732006-12-10 02:21:36 -0800281/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800282 * Fetch a shadow pte for a specific level in the paging hierarchy.
283 */
284static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
Avi Kivitye7a04c92008-12-25 15:10:50 +0200285 struct guest_walker *gw,
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200286 int user_fault, int write_fault, int hlevel,
Anthony Liguori35149e22008-04-02 14:46:56 -0500287 int *ptwrite, pfn_t pfn)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800288{
Avi Kivitye7a04c92008-12-25 15:10:50 +0200289 unsigned access = gw->pt_access;
290 struct kvm_mmu_page *shadow_page;
Jaswinder Singh Rajputbde89222009-05-20 09:59:35 +0530291 u64 spte, *sptep = NULL;
Avi Kivityf6e2c022009-01-11 13:02:10 +0200292 int direct;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200293 gfn_t table_gfn;
294 int r;
295 int level;
296 pt_element_t curr_pte;
297 struct kvm_shadow_walk_iterator iterator;
Avi Kivityac79c972007-01-05 16:36:40 -0800298
Avi Kivity43a37952009-06-10 14:12:05 +0300299 if (!is_present_gpte(gw->ptes[gw->level - 1]))
Avi Kivityac79c972007-01-05 16:36:40 -0800300 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800301
Avi Kivitye7a04c92008-12-25 15:10:50 +0200302 for_each_shadow_entry(vcpu, addr, iterator) {
303 level = iterator.level;
304 sptep = iterator.sptep;
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200305 if (iterator.level == hlevel) {
Avi Kivitye7a04c92008-12-25 15:10:50 +0200306 mmu_set_spte(vcpu, sptep, access,
307 gw->pte_access & access,
308 user_fault, write_fault,
309 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
Joerg Roedel852e3c12009-07-27 16:30:44 +0200310 ptwrite, level,
Avi Kivitye7a04c92008-12-25 15:10:50 +0200311 gw->gfn, pfn, false);
312 break;
313 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800314
Avi Kivitye7a04c92008-12-25 15:10:50 +0200315 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
316 continue;
317
318 if (is_large_pte(*sptep)) {
Joerg Roedelc5bc2242009-02-19 12:18:56 +0100319 rmap_remove(vcpu->kvm, sptep);
Avi Kivityd555c332009-06-10 14:24:23 +0300320 __set_spte(sptep, shadow_trap_nonpresent_pte);
Avi Kivitye7a04c92008-12-25 15:10:50 +0200321 kvm_flush_remote_tlbs(vcpu->kvm);
Avi Kivitye7a04c92008-12-25 15:10:50 +0200322 }
323
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200324 if (level <= gw->level) {
325 int delta = level - gw->level + 1;
Avi Kivityf6e2c022009-01-11 13:02:10 +0200326 direct = 1;
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200327 if (!is_dirty_gpte(gw->ptes[level - delta]))
Avi Kivitye7a04c92008-12-25 15:10:50 +0200328 access &= ~ACC_WRITE_MASK;
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200329 table_gfn = gpte_to_gfn(gw->ptes[level - delta]);
330 /* advance table_gfn when emulating 1gb pages with 4k */
331 if (delta == 0)
332 table_gfn += PT_INDEX(addr, level);
Avi Kivitye7a04c92008-12-25 15:10:50 +0200333 } else {
Avi Kivityf6e2c022009-01-11 13:02:10 +0200334 direct = 0;
Avi Kivitye7a04c92008-12-25 15:10:50 +0200335 table_gfn = gw->table_gfn[level - 2];
336 }
337 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
Avi Kivityf6e2c022009-01-11 13:02:10 +0200338 direct, access, sptep);
339 if (!direct) {
Avi Kivitye7a04c92008-12-25 15:10:50 +0200340 r = kvm_read_guest_atomic(vcpu->kvm,
341 gw->pte_gpa[level - 2],
342 &curr_pte, sizeof(curr_pte));
343 if (r || curr_pte != gw->ptes[level - 2]) {
344 kvm_mmu_put_page(shadow_page, sptep);
345 kvm_release_pfn_clean(pfn);
346 sptep = NULL;
347 break;
348 }
349 }
350
351 spte = __pa(shadow_page->spt)
352 | PT_PRESENT_MASK | PT_ACCESSED_MASK
353 | PT_WRITABLE_MASK | PT_USER_MASK;
354 *sptep = spte;
355 }
356
357 return sptep;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800358}
359
360/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800361 * Page fault handler. There are several causes for a page fault:
362 * - there is no shadow pte for the guest pte
363 * - write access through a shadow pte marked read only so that we can set
364 * the dirty bit
365 * - write access to a shadow pte marked read only so we can update the page
366 * dirty bitmap, when userspace requests it
367 * - mmio access; in this case we will never install a present shadow pte
368 * - normal guest page fault due to the guest pte marked not present, not
369 * writable, or not executable
370 *
Avi Kivitye2dec932007-01-05 16:36:54 -0800371 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
372 * a negative value on error.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800373 */
374static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
375 u32 error_code)
376{
377 int write_fault = error_code & PFERR_WRITE_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800378 int user_fault = error_code & PFERR_USER_MASK;
Avi Kivity73b10872007-01-26 00:56:41 -0800379 int fetch_fault = error_code & PFERR_FETCH_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800380 struct guest_walker walker;
Avi Kivityd555c332009-06-10 14:24:23 +0300381 u64 *sptep;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800382 int write_pt = 0;
Avi Kivitye2dec932007-01-05 16:36:54 -0800383 int r;
Anthony Liguori35149e22008-04-02 14:46:56 -0500384 pfn_t pfn;
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200385 int level = PT_PAGE_TABLE_LEVEL;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200386 unsigned long mmu_seq;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800387
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800388 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
Avi Kivity37a7d8b2007-01-05 16:36:56 -0800389 kvm_mmu_audit(vcpu, "pre page fault");
Avi Kivity714b93d2007-01-05 16:36:53 -0800390
Avi Kivitye2dec932007-01-05 16:36:54 -0800391 r = mmu_topup_memory_caches(vcpu);
392 if (r)
393 return r;
Avi Kivity714b93d2007-01-05 16:36:53 -0800394
Avi Kivity6aa8b732006-12-10 02:21:36 -0800395 /*
Eddie Donga8b876b2009-03-26 15:28:40 +0800396 * Look up the guest pte for the faulting address.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800397 */
Avi Kivity73b10872007-01-26 00:56:41 -0800398 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
399 fetch_fault);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800400
401 /*
402 * The page is not mapped by the guest. Let the guest handle it.
403 */
Avi Kivity7993ba42007-01-26 00:56:41 -0800404 if (!r) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800405 pgprintk("%s: guest page fault\n", __func__);
Avi Kivity7993ba42007-01-26 00:56:41 -0800406 inject_page_fault(vcpu, addr, walker.error_code);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800407 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800408 return 0;
409 }
410
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200411 if (walker.level >= PT_DIRECTORY_LEVEL) {
412 level = min(walker.level, mapping_level(vcpu, walker.gfn));
413 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300414 }
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200415
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200416 mmu_seq = vcpu->kvm->mmu_notifier_seq;
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -0300417 smp_rmb();
Anthony Liguori35149e22008-04-02 14:46:56 -0500418 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
Avi Kivityd7824ff2007-12-30 12:29:05 +0200419
Avi Kivityd196e342008-01-24 11:44:11 +0200420 /* mmio */
Anthony Liguori35149e22008-04-02 14:46:56 -0500421 if (is_error_pfn(pfn)) {
Avi Kivityebb0e622008-05-20 16:21:58 +0300422 pgprintk("gfn %lx is mmio\n", walker.gfn);
Anthony Liguori35149e22008-04-02 14:46:56 -0500423 kvm_release_pfn_clean(pfn);
Avi Kivityd196e342008-01-24 11:44:11 +0200424 return 1;
425 }
426
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -0500427 spin_lock(&vcpu->kvm->mmu_lock);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200428 if (mmu_notifier_retry(vcpu, mmu_seq))
429 goto out_unlock;
Avi Kivityeb787d12007-12-31 15:27:49 +0200430 kvm_mmu_free_some_pages(vcpu);
Avi Kivityd555c332009-06-10 14:24:23 +0300431 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200432 level, &write_pt, pfn);
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800433 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
Avi Kivityd555c332009-06-10 14:24:23 +0300434 sptep, *sptep, write_pt);
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800435
Avi Kivitya25f7e12007-04-30 17:05:38 +0300436 if (!write_pt)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800437 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
Avi Kivitya25f7e12007-04-30 17:05:38 +0300438
Avi Kivity1165f5f2007-04-19 17:27:43 +0300439 ++vcpu->stat.pf_fixed;
Avi Kivity37a7d8b2007-01-05 16:36:56 -0800440 kvm_mmu_audit(vcpu, "post page fault (fixed)");
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -0500441 spin_unlock(&vcpu->kvm->mmu_lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800442
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800443 return write_pt;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200444
445out_unlock:
446 spin_unlock(&vcpu->kvm->mmu_lock);
447 kvm_release_pfn_clean(pfn);
448 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800449}
450
Marcelo Tosattia7052892008-09-23 13:18:35 -0300451static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
452{
Avi Kivitya4619302008-12-25 15:19:00 +0200453 struct kvm_shadow_walk_iterator iterator;
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200454 pt_element_t gpte;
Avi Kivitya4619302008-12-25 15:19:00 +0200455 gpa_t pte_gpa = -1;
456 int level;
457 u64 *sptep;
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100458 int need_flush = 0;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300459
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200460 spin_lock(&vcpu->kvm->mmu_lock);
Avi Kivitya4619302008-12-25 15:19:00 +0200461
462 for_each_shadow_entry(vcpu, gva, iterator) {
463 level = iterator.level;
464 sptep = iterator.sptep;
465
466 /* FIXME: properly handle invlpg on large guest pages */
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200467 if (level == PT_PAGE_TABLE_LEVEL ||
468 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
469 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
Avi Kivitya4619302008-12-25 15:19:00 +0200470 struct kvm_mmu_page *sp = page_header(__pa(sptep));
471
472 pte_gpa = (sp->gfn << PAGE_SHIFT);
473 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
474
475 if (is_shadow_present_pte(*sptep)) {
476 rmap_remove(vcpu->kvm, sptep);
477 if (is_large_pte(*sptep))
478 --vcpu->kvm->stat.lpages;
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100479 need_flush = 1;
Avi Kivitya4619302008-12-25 15:19:00 +0200480 }
Avi Kivityd555c332009-06-10 14:24:23 +0300481 __set_spte(sptep, shadow_trap_nonpresent_pte);
Avi Kivitya4619302008-12-25 15:19:00 +0200482 break;
483 }
484
485 if (!is_shadow_present_pte(*sptep))
486 break;
487 }
488
Andrea Arcangeli4539b352009-03-12 18:18:43 +0100489 if (need_flush)
490 kvm_flush_remote_tlbs(vcpu->kvm);
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200491 spin_unlock(&vcpu->kvm->mmu_lock);
Avi Kivitya4619302008-12-25 15:19:00 +0200492
493 if (pte_gpa == -1)
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200494 return;
Avi Kivitya4619302008-12-25 15:19:00 +0200495 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200496 sizeof(pt_element_t)))
497 return;
Avi Kivity43a37952009-06-10 14:12:05 +0300498 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200499 if (mmu_topup_memory_caches(vcpu))
500 return;
Avi Kivitya4619302008-12-25 15:19:00 +0200501 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200502 sizeof(pt_element_t), 0);
503 }
Marcelo Tosattia7052892008-09-23 13:18:35 -0300504}
505
Avi Kivity6aa8b732006-12-10 02:21:36 -0800506static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
507{
508 struct guest_walker walker;
Avi Kivitye119d112007-02-12 00:54:36 -0800509 gpa_t gpa = UNMAPPED_GVA;
510 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800511
Avi Kivitye119d112007-02-12 00:54:36 -0800512 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800513
Avi Kivitye119d112007-02-12 00:54:36 -0800514 if (r) {
Avi Kivity1755fbc2007-11-21 14:44:45 +0200515 gpa = gfn_to_gpa(walker.gfn);
Avi Kivitye119d112007-02-12 00:54:36 -0800516 gpa |= vaddr & ~PAGE_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800517 }
518
519 return gpa;
520}
521
Avi Kivityc7addb92007-09-16 18:58:32 +0200522static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
523 struct kvm_mmu_page *sp)
524{
Avi Kivityeab9f712008-05-29 14:20:16 +0300525 int i, j, offset, r;
526 pt_element_t pt[256 / sizeof(pt_element_t)];
527 gpa_t pte_gpa;
Avi Kivityc7addb92007-09-16 18:58:32 +0200528
Avi Kivityf6e2c022009-01-11 13:02:10 +0200529 if (sp->role.direct
Avi Kivitye5a4c8c2007-11-20 21:39:54 +0200530 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
Avi Kivityc7addb92007-09-16 18:58:32 +0200531 nonpaging_prefetch_page(vcpu, sp);
532 return;
533 }
534
Avi Kivityeab9f712008-05-29 14:20:16 +0300535 pte_gpa = gfn_to_gpa(sp->gfn);
536 if (PTTYPE == 32) {
Avi Kivitye5a4c8c2007-11-20 21:39:54 +0200537 offset = sp->role.quadrant << PT64_LEVEL_BITS;
Avi Kivityeab9f712008-05-29 14:20:16 +0300538 pte_gpa += offset * sizeof(pt_element_t);
539 }
Marcelo Tosatti7ec54582007-12-20 19:18:23 -0500540
Avi Kivityeab9f712008-05-29 14:20:16 +0300541 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
542 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
543 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
544 for (j = 0; j < ARRAY_SIZE(pt); ++j)
Avi Kivity43a37952009-06-10 14:12:05 +0300545 if (r || is_present_gpte(pt[j]))
Avi Kivityeab9f712008-05-29 14:20:16 +0300546 sp->spt[i+j] = shadow_trap_nonpresent_pte;
547 else
548 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
Marcelo Tosatti7ec54582007-12-20 19:18:23 -0500549 }
Avi Kivityc7addb92007-09-16 18:58:32 +0200550}
551
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300552/*
553 * Using the cached information from sp->gfns is safe because:
554 * - The spte has a reference to the struct page, so the pfn for a given gfn
555 * can't change unless all sptes pointing to it are nuked first.
556 * - Alias changes zap the entire shadow cache.
557 */
558static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
559{
560 int i, offset, nr_present;
561
562 offset = nr_present = 0;
563
564 if (PTTYPE == 32)
565 offset = sp->role.quadrant << PT64_LEVEL_BITS;
566
567 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
568 unsigned pte_access;
569 pt_element_t gpte;
570 gpa_t pte_gpa;
571 gfn_t gfn = sp->gfns[i];
572
573 if (!is_shadow_present_pte(sp->spt[i]))
574 continue;
575
576 pte_gpa = gfn_to_gpa(sp->gfn);
577 pte_gpa += (i+offset) * sizeof(pt_element_t);
578
579 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
580 sizeof(pt_element_t)))
581 return -EINVAL;
582
Avi Kivity43a37952009-06-10 14:12:05 +0300583 if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300584 !(gpte & PT_ACCESSED_MASK)) {
585 u64 nonpresent;
586
587 rmap_remove(vcpu->kvm, &sp->spt[i]);
Avi Kivity43a37952009-06-10 14:12:05 +0300588 if (is_present_gpte(gpte))
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300589 nonpresent = shadow_trap_nonpresent_pte;
590 else
591 nonpresent = shadow_notrap_nonpresent_pte;
Avi Kivityd555c332009-06-10 14:24:23 +0300592 __set_spte(&sp->spt[i], nonpresent);
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300593 continue;
594 }
595
596 nr_present++;
597 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
598 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
Joerg Roedel7e4e4052009-07-27 16:30:46 +0200599 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300600 spte_to_pfn(sp->spt[i]), true, false);
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300601 }
602
603 return !nr_present;
604}
605
Avi Kivity6aa8b732006-12-10 02:21:36 -0800606#undef pt_element_t
607#undef guest_walker
608#undef FNAME
609#undef PT_BASE_ADDR_MASK
610#undef PT_INDEX
Avi Kivity6aa8b732006-12-10 02:21:36 -0800611#undef PT_LEVEL_MASK
Joerg Roedele04da982009-07-27 16:30:45 +0200612#undef PT_LVL_ADDR_MASK
613#undef PT_LVL_OFFSET_MASK
Avi Kivityc7addb92007-09-16 18:58:32 +0200614#undef PT_LEVEL_BITS
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800615#undef PT_MAX_FULL_LEVELS
Avi Kivity5fb07dd2007-11-21 12:35:07 +0200616#undef gpte_to_gfn
Joerg Roedele04da982009-07-27 16:30:45 +0200617#undef gpte_to_gfn_lvl
Marcelo Tosattib3e4e632007-12-07 07:56:58 -0500618#undef CMPXCHG