blob: 171bcea1be219e3ed93be697970f30a282dc993c [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
Avi Kivity6aa8b732006-12-10 02:21:36 -080019
20#include "vmx.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080021#include "mmu.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080022
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Avi Kivitye4956062007-06-28 14:15:57 -040024#include <linux/types.h>
25#include <linux/string.h>
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
Izik Eidus448353c2007-11-26 14:08:14 +020029#include <linux/swap.h>
Marcelo Tosatti05da4552008-02-23 11:44:30 -030030#include <linux/hugetlb.h>
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -050031#include <linux/compiler.h>
Avi Kivitye4956062007-06-28 14:15:57 -040032
33#include <asm/page.h>
34#include <asm/cmpxchg.h>
Avi Kivity4e542372007-11-21 14:08:40 +020035#include <asm/io.h>
Avi Kivitye4956062007-06-28 14:15:57 -040036
Joerg Roedel18552672008-02-07 13:47:41 +010037/*
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
43 */
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -050044bool tdp_enabled = false;
Joerg Roedel18552672008-02-07 13:47:41 +010045
Avi Kivity37a7d8b2007-01-05 16:36:56 -080046#undef MMU_DEBUG
47
48#undef AUDIT
49
50#ifdef AUDIT
51static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52#else
53static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54#endif
55
56#ifdef MMU_DEBUG
57
58#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61#else
62
63#define pgprintk(x...) do { } while (0)
64#define rmap_printk(x...) do { } while (0)
65
66#endif
67
68#if defined(MMU_DEBUG) || defined(AUDIT)
Avi Kivity6ada8cc2008-06-22 16:45:24 +030069static int dbg = 0;
70module_param(dbg, bool, 0644);
Avi Kivity37a7d8b2007-01-05 16:36:56 -080071#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -080072
Yaozu Dongd6c69ee2007-04-25 14:17:25 +080073#ifndef MMU_DEBUG
74#define ASSERT(x) do { } while (0)
75#else
Avi Kivity6aa8b732006-12-10 02:21:36 -080076#define ASSERT(x) \
77 if (!(x)) { \
78 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
79 __FILE__, __LINE__, #x); \
80 }
Yaozu Dongd6c69ee2007-04-25 14:17:25 +080081#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -080082
Avi Kivity6aa8b732006-12-10 02:21:36 -080083#define PT_FIRST_AVAIL_BITS_SHIFT 9
84#define PT64_SECOND_AVAIL_BITS_SHIFT 52
85
Avi Kivity6aa8b732006-12-10 02:21:36 -080086#define VALID_PAGE(x) ((x) != INVALID_PAGE)
87
88#define PT64_LEVEL_BITS 9
89
90#define PT64_LEVEL_SHIFT(level) \
Mike Dayd77c26f2007-10-08 09:02:08 -040091 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
Avi Kivity6aa8b732006-12-10 02:21:36 -080092
93#define PT64_LEVEL_MASK(level) \
94 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
95
96#define PT64_INDEX(address, level)\
97 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
98
99
100#define PT32_LEVEL_BITS 10
101
102#define PT32_LEVEL_SHIFT(level) \
Mike Dayd77c26f2007-10-08 09:02:08 -0400103 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800104
105#define PT32_LEVEL_MASK(level) \
106 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
107
108#define PT32_INDEX(address, level)\
109 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
110
111
Avi Kivity27aba762007-03-09 13:04:31 +0200112#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
Avi Kivity6aa8b732006-12-10 02:21:36 -0800113#define PT64_DIR_BASE_ADDR_MASK \
114 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115
116#define PT32_BASE_ADDR_MASK PAGE_MASK
117#define PT32_DIR_BASE_ADDR_MASK \
118 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
119
Avi Kivity79539ce2007-11-21 02:06:21 +0200120#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
121 | PT64_NX_MASK)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800122
123#define PFERR_PRESENT_MASK (1U << 0)
124#define PFERR_WRITE_MASK (1U << 1)
125#define PFERR_USER_MASK (1U << 2)
Avi Kivity73b10872007-01-26 00:56:41 -0800126#define PFERR_FETCH_MASK (1U << 4)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800127
Avi Kivity6aa8b732006-12-10 02:21:36 -0800128#define PT_DIRECTORY_LEVEL 2
129#define PT_PAGE_TABLE_LEVEL 1
130
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800131#define RMAP_EXT 4
132
Avi Kivityfe135d22007-12-09 16:15:46 +0200133#define ACC_EXEC_MASK 1
134#define ACC_WRITE_MASK PT_WRITABLE_MASK
135#define ACC_USER_MASK PT_USER_MASK
136#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
137
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800138struct kvm_rmap_desc {
139 u64 *shadow_ptes[RMAP_EXT];
140 struct kvm_rmap_desc *more;
141};
142
Avi Kivityb5a33a72007-04-15 16:31:09 +0300143static struct kmem_cache *pte_chain_cache;
144static struct kmem_cache *rmap_desc_cache;
Avi Kivityd3d25b02007-05-30 12:34:53 +0300145static struct kmem_cache *mmu_page_header_cache;
Avi Kivityb5a33a72007-04-15 16:31:09 +0300146
Avi Kivityc7addb92007-09-16 18:58:32 +0200147static u64 __read_mostly shadow_trap_nonpresent_pte;
148static u64 __read_mostly shadow_notrap_nonpresent_pte;
Sheng Yang7b523452008-04-25 21:13:50 +0800149static u64 __read_mostly shadow_base_present_pte;
150static u64 __read_mostly shadow_nx_mask;
151static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
152static u64 __read_mostly shadow_user_mask;
153static u64 __read_mostly shadow_accessed_mask;
154static u64 __read_mostly shadow_dirty_mask;
Avi Kivityc7addb92007-09-16 18:58:32 +0200155
156void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
157{
158 shadow_trap_nonpresent_pte = trap_pte;
159 shadow_notrap_nonpresent_pte = notrap_pte;
160}
161EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
162
Sheng Yang7b523452008-04-25 21:13:50 +0800163void kvm_mmu_set_base_ptes(u64 base_pte)
164{
165 shadow_base_present_pte = base_pte;
166}
167EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
168
169void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
170 u64 dirty_mask, u64 nx_mask, u64 x_mask)
171{
172 shadow_user_mask = user_mask;
173 shadow_accessed_mask = accessed_mask;
174 shadow_dirty_mask = dirty_mask;
175 shadow_nx_mask = nx_mask;
176 shadow_x_mask = x_mask;
177}
178EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
179
Avi Kivity6aa8b732006-12-10 02:21:36 -0800180static int is_write_protection(struct kvm_vcpu *vcpu)
181{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800182 return vcpu->arch.cr0 & X86_CR0_WP;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800183}
184
185static int is_cpuid_PSE36(void)
186{
187 return 1;
188}
189
Avi Kivity73b10872007-01-26 00:56:41 -0800190static int is_nx(struct kvm_vcpu *vcpu)
191{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800192 return vcpu->arch.shadow_efer & EFER_NX;
Avi Kivity73b10872007-01-26 00:56:41 -0800193}
194
Avi Kivity6aa8b732006-12-10 02:21:36 -0800195static int is_present_pte(unsigned long pte)
196{
197 return pte & PT_PRESENT_MASK;
198}
199
Avi Kivityc7addb92007-09-16 18:58:32 +0200200static int is_shadow_present_pte(u64 pte)
201{
Avi Kivityc7addb92007-09-16 18:58:32 +0200202 return pte != shadow_trap_nonpresent_pte
203 && pte != shadow_notrap_nonpresent_pte;
204}
205
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300206static int is_large_pte(u64 pte)
207{
208 return pte & PT_PAGE_SIZE_MASK;
209}
210
Avi Kivity6aa8b732006-12-10 02:21:36 -0800211static int is_writeble_pte(unsigned long pte)
212{
213 return pte & PT_WRITABLE_MASK;
214}
215
Avi Kivitye3c5e7ec2007-10-11 12:32:30 +0200216static int is_dirty_pte(unsigned long pte)
217{
Sheng Yang7b523452008-04-25 21:13:50 +0800218 return pte & shadow_dirty_mask;
Avi Kivitye3c5e7ec2007-10-11 12:32:30 +0200219}
220
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800221static int is_rmap_pte(u64 pte)
222{
Avi Kivity4b1a80f2008-03-23 12:18:19 +0200223 return is_shadow_present_pte(pte);
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800224}
225
Anthony Liguori35149e22008-04-02 14:46:56 -0500226static pfn_t spte_to_pfn(u64 pte)
Avi Kivity0b49ea82008-03-23 15:06:23 +0200227{
Anthony Liguori35149e22008-04-02 14:46:56 -0500228 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
Avi Kivity0b49ea82008-03-23 15:06:23 +0200229}
230
Avi Kivityda9285212007-11-21 13:54:47 +0200231static gfn_t pse36_gfn_delta(u32 gpte)
232{
233 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
234
235 return (gpte & PT32_DIR_PSE36_MASK) << shift;
236}
237
Avi Kivitye663ee62007-05-31 15:46:04 +0300238static void set_shadow_pte(u64 *sptep, u64 spte)
239{
240#ifdef CONFIG_X86_64
241 set_64bit((unsigned long *)sptep, spte);
242#else
243 set_64bit((unsigned long long *)sptep, spte);
244#endif
245}
246
Avi Kivitye2dec932007-01-05 16:36:54 -0800247static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
Avi Kivity2e3e5882007-09-10 11:28:17 +0300248 struct kmem_cache *base_cache, int min)
Avi Kivity714b93d2007-01-05 16:36:53 -0800249{
250 void *obj;
251
252 if (cache->nobjs >= min)
Avi Kivitye2dec932007-01-05 16:36:54 -0800253 return 0;
Avi Kivity714b93d2007-01-05 16:36:53 -0800254 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
Avi Kivity2e3e5882007-09-10 11:28:17 +0300255 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
Avi Kivity714b93d2007-01-05 16:36:53 -0800256 if (!obj)
Avi Kivitye2dec932007-01-05 16:36:54 -0800257 return -ENOMEM;
Avi Kivity714b93d2007-01-05 16:36:53 -0800258 cache->objects[cache->nobjs++] = obj;
259 }
Avi Kivitye2dec932007-01-05 16:36:54 -0800260 return 0;
Avi Kivity714b93d2007-01-05 16:36:53 -0800261}
262
263static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
264{
265 while (mc->nobjs)
266 kfree(mc->objects[--mc->nobjs]);
267}
268
Avi Kivityc1158e62007-07-20 08:18:27 +0300269static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
Avi Kivity2e3e5882007-09-10 11:28:17 +0300270 int min)
Avi Kivityc1158e62007-07-20 08:18:27 +0300271{
272 struct page *page;
273
274 if (cache->nobjs >= min)
275 return 0;
276 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
Avi Kivity2e3e5882007-09-10 11:28:17 +0300277 page = alloc_page(GFP_KERNEL);
Avi Kivityc1158e62007-07-20 08:18:27 +0300278 if (!page)
279 return -ENOMEM;
280 set_page_private(page, 0);
281 cache->objects[cache->nobjs++] = page_address(page);
282 }
283 return 0;
284}
285
286static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
287{
288 while (mc->nobjs)
Avi Kivityc4d198d2007-07-21 09:06:46 +0300289 free_page((unsigned long)mc->objects[--mc->nobjs]);
Avi Kivityc1158e62007-07-20 08:18:27 +0300290}
291
Avi Kivity8c438502007-04-16 11:53:17 +0300292static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
293{
294 int r;
295
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800296 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
Avi Kivity2e3e5882007-09-10 11:28:17 +0300297 pte_chain_cache, 4);
298 if (r)
299 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800300 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
Avi Kivity2e3e5882007-09-10 11:28:17 +0300301 rmap_desc_cache, 1);
302 if (r)
303 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800304 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
Avi Kivity2e3e5882007-09-10 11:28:17 +0300305 if (r)
306 goto out;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800307 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
Avi Kivity2e3e5882007-09-10 11:28:17 +0300308 mmu_page_header_cache, 4);
309out:
Avi Kivity8c438502007-04-16 11:53:17 +0300310 return r;
311}
312
Avi Kivity714b93d2007-01-05 16:36:53 -0800313static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
314{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800315 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
316 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
317 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
318 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
Avi Kivity714b93d2007-01-05 16:36:53 -0800319}
320
321static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
322 size_t size)
323{
324 void *p;
325
326 BUG_ON(!mc->nobjs);
327 p = mc->objects[--mc->nobjs];
328 memset(p, 0, size);
329 return p;
330}
331
Avi Kivity714b93d2007-01-05 16:36:53 -0800332static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
333{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800334 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
Avi Kivity714b93d2007-01-05 16:36:53 -0800335 sizeof(struct kvm_pte_chain));
336}
337
Avi Kivity90cb0522007-07-17 13:04:56 +0300338static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
Avi Kivity714b93d2007-01-05 16:36:53 -0800339{
Avi Kivity90cb0522007-07-17 13:04:56 +0300340 kfree(pc);
Avi Kivity714b93d2007-01-05 16:36:53 -0800341}
342
343static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
344{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800345 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
Avi Kivity714b93d2007-01-05 16:36:53 -0800346 sizeof(struct kvm_rmap_desc));
347}
348
Avi Kivity90cb0522007-07-17 13:04:56 +0300349static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
Avi Kivity714b93d2007-01-05 16:36:53 -0800350{
Avi Kivity90cb0522007-07-17 13:04:56 +0300351 kfree(rd);
Avi Kivity714b93d2007-01-05 16:36:53 -0800352}
353
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800354/*
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300355 * Return the pointer to the largepage write count for a given
356 * gfn, handling slots that are not large page aligned.
357 */
358static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
359{
360 unsigned long idx;
361
362 idx = (gfn / KVM_PAGES_PER_HPAGE) -
363 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
364 return &slot->lpage_info[idx].write_count;
365}
366
367static void account_shadowed(struct kvm *kvm, gfn_t gfn)
368{
369 int *write_count;
370
371 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
372 *write_count += 1;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300373}
374
375static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
376{
377 int *write_count;
378
379 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
380 *write_count -= 1;
381 WARN_ON(*write_count < 0);
382}
383
384static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
385{
386 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
387 int *largepage_idx;
388
389 if (slot) {
390 largepage_idx = slot_largepage_idx(gfn, slot);
391 return *largepage_idx;
392 }
393
394 return 1;
395}
396
397static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
398{
399 struct vm_area_struct *vma;
400 unsigned long addr;
401
402 addr = gfn_to_hva(kvm, gfn);
403 if (kvm_is_error_hva(addr))
404 return 0;
405
406 vma = find_vma(current->mm, addr);
407 if (vma && is_vm_hugetlb_page(vma))
408 return 1;
409
410 return 0;
411}
412
413static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
414{
415 struct kvm_memory_slot *slot;
416
417 if (has_wrprotected_page(vcpu->kvm, large_gfn))
418 return 0;
419
420 if (!host_largepage_backed(vcpu->kvm, large_gfn))
421 return 0;
422
423 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
424 if (slot && slot->dirty_bitmap)
425 return 0;
426
427 return 1;
428}
429
430/*
Izik Eidus290fc382007-09-27 14:11:22 +0200431 * Take gfn and return the reverse mapping to it.
432 * Note: gfn must be unaliased before this function get called
433 */
434
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300435static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
Izik Eidus290fc382007-09-27 14:11:22 +0200436{
437 struct kvm_memory_slot *slot;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300438 unsigned long idx;
Izik Eidus290fc382007-09-27 14:11:22 +0200439
440 slot = gfn_to_memslot(kvm, gfn);
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300441 if (!lpage)
442 return &slot->rmap[gfn - slot->base_gfn];
443
444 idx = (gfn / KVM_PAGES_PER_HPAGE) -
445 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
446
447 return &slot->lpage_info[idx].rmap_pde;
Izik Eidus290fc382007-09-27 14:11:22 +0200448}
449
450/*
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800451 * Reverse mapping data structures:
452 *
Izik Eidus290fc382007-09-27 14:11:22 +0200453 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
454 * that points to page_address(page).
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800455 *
Izik Eidus290fc382007-09-27 14:11:22 +0200456 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
457 * containing more mappings.
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800458 */
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300459static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800460{
Avi Kivity4db35312007-11-21 15:28:32 +0200461 struct kvm_mmu_page *sp;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800462 struct kvm_rmap_desc *desc;
Izik Eidus290fc382007-09-27 14:11:22 +0200463 unsigned long *rmapp;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800464 int i;
465
466 if (!is_rmap_pte(*spte))
467 return;
Izik Eidus290fc382007-09-27 14:11:22 +0200468 gfn = unalias_gfn(vcpu->kvm, gfn);
Avi Kivity4db35312007-11-21 15:28:32 +0200469 sp = page_header(__pa(spte));
470 sp->gfns[spte - sp->spt] = gfn;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300471 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
Izik Eidus290fc382007-09-27 14:11:22 +0200472 if (!*rmapp) {
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800473 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
Izik Eidus290fc382007-09-27 14:11:22 +0200474 *rmapp = (unsigned long)spte;
475 } else if (!(*rmapp & 1)) {
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800476 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
Avi Kivity714b93d2007-01-05 16:36:53 -0800477 desc = mmu_alloc_rmap_desc(vcpu);
Izik Eidus290fc382007-09-27 14:11:22 +0200478 desc->shadow_ptes[0] = (u64 *)*rmapp;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800479 desc->shadow_ptes[1] = spte;
Izik Eidus290fc382007-09-27 14:11:22 +0200480 *rmapp = (unsigned long)desc | 1;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800481 } else {
482 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
Izik Eidus290fc382007-09-27 14:11:22 +0200483 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800484 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
485 desc = desc->more;
486 if (desc->shadow_ptes[RMAP_EXT-1]) {
Avi Kivity714b93d2007-01-05 16:36:53 -0800487 desc->more = mmu_alloc_rmap_desc(vcpu);
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800488 desc = desc->more;
489 }
490 for (i = 0; desc->shadow_ptes[i]; ++i)
491 ;
492 desc->shadow_ptes[i] = spte;
493 }
494}
495
Izik Eidus290fc382007-09-27 14:11:22 +0200496static void rmap_desc_remove_entry(unsigned long *rmapp,
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800497 struct kvm_rmap_desc *desc,
498 int i,
499 struct kvm_rmap_desc *prev_desc)
500{
501 int j;
502
503 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
504 ;
505 desc->shadow_ptes[i] = desc->shadow_ptes[j];
Al Viro11718b4d2007-02-09 16:39:20 +0000506 desc->shadow_ptes[j] = NULL;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800507 if (j != 0)
508 return;
509 if (!prev_desc && !desc->more)
Izik Eidus290fc382007-09-27 14:11:22 +0200510 *rmapp = (unsigned long)desc->shadow_ptes[0];
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800511 else
512 if (prev_desc)
513 prev_desc->more = desc->more;
514 else
Izik Eidus290fc382007-09-27 14:11:22 +0200515 *rmapp = (unsigned long)desc->more | 1;
Avi Kivity90cb0522007-07-17 13:04:56 +0300516 mmu_free_rmap_desc(desc);
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800517}
518
Izik Eidus290fc382007-09-27 14:11:22 +0200519static void rmap_remove(struct kvm *kvm, u64 *spte)
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800520{
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800521 struct kvm_rmap_desc *desc;
522 struct kvm_rmap_desc *prev_desc;
Avi Kivity4db35312007-11-21 15:28:32 +0200523 struct kvm_mmu_page *sp;
Anthony Liguori35149e22008-04-02 14:46:56 -0500524 pfn_t pfn;
Izik Eidus290fc382007-09-27 14:11:22 +0200525 unsigned long *rmapp;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800526 int i;
527
528 if (!is_rmap_pte(*spte))
529 return;
Avi Kivity4db35312007-11-21 15:28:32 +0200530 sp = page_header(__pa(spte));
Anthony Liguori35149e22008-04-02 14:46:56 -0500531 pfn = spte_to_pfn(*spte);
Sheng Yang7b523452008-04-25 21:13:50 +0800532 if (*spte & shadow_accessed_mask)
Anthony Liguori35149e22008-04-02 14:46:56 -0500533 kvm_set_pfn_accessed(pfn);
Izik Eidusb4231d62007-11-20 11:49:33 +0200534 if (is_writeble_pte(*spte))
Anthony Liguori35149e22008-04-02 14:46:56 -0500535 kvm_release_pfn_dirty(pfn);
Izik Eidusb4231d62007-11-20 11:49:33 +0200536 else
Anthony Liguori35149e22008-04-02 14:46:56 -0500537 kvm_release_pfn_clean(pfn);
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300538 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
Izik Eidus290fc382007-09-27 14:11:22 +0200539 if (!*rmapp) {
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800540 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
541 BUG();
Izik Eidus290fc382007-09-27 14:11:22 +0200542 } else if (!(*rmapp & 1)) {
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800543 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
Izik Eidus290fc382007-09-27 14:11:22 +0200544 if ((u64 *)*rmapp != spte) {
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800545 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
546 spte, *spte);
547 BUG();
548 }
Izik Eidus290fc382007-09-27 14:11:22 +0200549 *rmapp = 0;
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800550 } else {
551 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
Izik Eidus290fc382007-09-27 14:11:22 +0200552 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800553 prev_desc = NULL;
554 while (desc) {
555 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
556 if (desc->shadow_ptes[i] == spte) {
Izik Eidus290fc382007-09-27 14:11:22 +0200557 rmap_desc_remove_entry(rmapp,
Avi Kivity714b93d2007-01-05 16:36:53 -0800558 desc, i,
Avi Kivitycd4a4e52007-01-05 16:36:38 -0800559 prev_desc);
560 return;
561 }
562 prev_desc = desc;
563 desc = desc->more;
564 }
565 BUG();
566 }
567}
568
Izik Eidus98348e92007-10-16 14:42:30 +0200569static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
Avi Kivity374cbac2007-01-05 16:36:43 -0800570{
Avi Kivity374cbac2007-01-05 16:36:43 -0800571 struct kvm_rmap_desc *desc;
Izik Eidus98348e92007-10-16 14:42:30 +0200572 struct kvm_rmap_desc *prev_desc;
573 u64 *prev_spte;
574 int i;
575
576 if (!*rmapp)
577 return NULL;
578 else if (!(*rmapp & 1)) {
579 if (!spte)
580 return (u64 *)*rmapp;
581 return NULL;
582 }
583 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
584 prev_desc = NULL;
585 prev_spte = NULL;
586 while (desc) {
587 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
588 if (prev_spte == spte)
589 return desc->shadow_ptes[i];
590 prev_spte = desc->shadow_ptes[i];
591 }
592 desc = desc->more;
593 }
594 return NULL;
595}
596
597static void rmap_write_protect(struct kvm *kvm, u64 gfn)
598{
Izik Eidus290fc382007-09-27 14:11:22 +0200599 unsigned long *rmapp;
Avi Kivity374cbac2007-01-05 16:36:43 -0800600 u64 *spte;
Eddie Dongcaa5b8a2007-12-18 06:08:27 +0800601 int write_protected = 0;
Avi Kivity374cbac2007-01-05 16:36:43 -0800602
Anthony Liguori4a4c9922007-10-10 20:08:41 -0500603 gfn = unalias_gfn(kvm, gfn);
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300604 rmapp = gfn_to_rmap(kvm, gfn, 0);
Avi Kivity374cbac2007-01-05 16:36:43 -0800605
Izik Eidus98348e92007-10-16 14:42:30 +0200606 spte = rmap_next(kvm, rmapp, NULL);
607 while (spte) {
Avi Kivity374cbac2007-01-05 16:36:43 -0800608 BUG_ON(!spte);
Avi Kivity374cbac2007-01-05 16:36:43 -0800609 BUG_ON(!(*spte & PT_PRESENT_MASK));
Avi Kivity374cbac2007-01-05 16:36:43 -0800610 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
Eddie Dongcaa5b8a2007-12-18 06:08:27 +0800611 if (is_writeble_pte(*spte)) {
Izik Eidus9647c142007-10-16 14:43:46 +0200612 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
Eddie Dongcaa5b8a2007-12-18 06:08:27 +0800613 write_protected = 1;
614 }
Izik Eidus9647c142007-10-16 14:43:46 +0200615 spte = rmap_next(kvm, rmapp, spte);
Avi Kivity374cbac2007-01-05 16:36:43 -0800616 }
Izik Eidus855149a2008-03-20 18:17:24 +0200617 if (write_protected) {
Anthony Liguori35149e22008-04-02 14:46:56 -0500618 pfn_t pfn;
Izik Eidus855149a2008-03-20 18:17:24 +0200619
620 spte = rmap_next(kvm, rmapp, NULL);
Anthony Liguori35149e22008-04-02 14:46:56 -0500621 pfn = spte_to_pfn(*spte);
622 kvm_set_pfn_dirty(pfn);
Izik Eidus855149a2008-03-20 18:17:24 +0200623 }
624
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300625 /* check for huge page mappings */
626 rmapp = gfn_to_rmap(kvm, gfn, 1);
627 spte = rmap_next(kvm, rmapp, NULL);
628 while (spte) {
629 BUG_ON(!spte);
630 BUG_ON(!(*spte & PT_PRESENT_MASK));
631 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
632 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
633 if (is_writeble_pte(*spte)) {
634 rmap_remove(kvm, spte);
635 --kvm->stat.lpages;
636 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
Marcelo Tosatti6597ca02008-06-08 01:48:53 -0300637 spte = NULL;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300638 write_protected = 1;
639 }
640 spte = rmap_next(kvm, rmapp, spte);
641 }
642
Eddie Dongcaa5b8a2007-12-18 06:08:27 +0800643 if (write_protected)
644 kvm_flush_remote_tlbs(kvm);
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300645
646 account_shadowed(kvm, gfn);
Avi Kivity374cbac2007-01-05 16:36:43 -0800647}
648
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200649static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
650{
651 u64 *spte;
652 int need_tlb_flush = 0;
653
654 while ((spte = rmap_next(kvm, rmapp, NULL))) {
655 BUG_ON(!(*spte & PT_PRESENT_MASK));
656 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
657 rmap_remove(kvm, spte);
658 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
659 need_tlb_flush = 1;
660 }
661 return need_tlb_flush;
662}
663
664static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
665 int (*handler)(struct kvm *kvm, unsigned long *rmapp))
666{
667 int i;
668 int retval = 0;
669
670 /*
671 * If mmap_sem isn't taken, we can look the memslots with only
672 * the mmu_lock by skipping over the slots with userspace_addr == 0.
673 */
674 for (i = 0; i < kvm->nmemslots; i++) {
675 struct kvm_memory_slot *memslot = &kvm->memslots[i];
676 unsigned long start = memslot->userspace_addr;
677 unsigned long end;
678
679 /* mmu_lock protects userspace_addr */
680 if (!start)
681 continue;
682
683 end = start + (memslot->npages << PAGE_SHIFT);
684 if (hva >= start && hva < end) {
685 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
686 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
687 retval |= handler(kvm,
688 &memslot->lpage_info[
689 gfn_offset /
690 KVM_PAGES_PER_HPAGE].rmap_pde);
691 }
692 }
693
694 return retval;
695}
696
697int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
698{
699 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
700}
701
702static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
703{
704 u64 *spte;
705 int young = 0;
706
Sheng Yang534e38b2008-09-08 15:12:30 +0800707 /* always return old for EPT */
708 if (!shadow_accessed_mask)
709 return 0;
710
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200711 spte = rmap_next(kvm, rmapp, NULL);
712 while (spte) {
713 int _young;
714 u64 _spte = *spte;
715 BUG_ON(!(_spte & PT_PRESENT_MASK));
716 _young = _spte & PT_ACCESSED_MASK;
717 if (_young) {
718 young = 1;
719 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
720 }
721 spte = rmap_next(kvm, rmapp, spte);
722 }
723 return young;
724}
725
726int kvm_age_hva(struct kvm *kvm, unsigned long hva)
727{
728 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
729}
730
Yaozu Dongd6c69ee2007-04-25 14:17:25 +0800731#ifdef MMU_DEBUG
Avi Kivity47ad8e62007-05-06 15:50:58 +0300732static int is_empty_shadow_page(u64 *spt)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800733{
Avi Kivity139bdb22007-01-05 16:36:50 -0800734 u64 *pos;
735 u64 *end;
736
Avi Kivity47ad8e62007-05-06 15:50:58 +0300737 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
Avi Kivity3c915512008-05-20 16:21:13 +0300738 if (is_shadow_present_pte(*pos)) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800739 printk(KERN_ERR "%s: %p %llx\n", __func__,
Avi Kivity139bdb22007-01-05 16:36:50 -0800740 pos, *pos);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800741 return 0;
Avi Kivity139bdb22007-01-05 16:36:50 -0800742 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800743 return 1;
744}
Yaozu Dongd6c69ee2007-04-25 14:17:25 +0800745#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800746
Avi Kivity4db35312007-11-21 15:28:32 +0200747static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
Avi Kivity260746c2007-01-05 16:36:49 -0800748{
Avi Kivity4db35312007-11-21 15:28:32 +0200749 ASSERT(is_empty_shadow_page(sp->spt));
750 list_del(&sp->link);
751 __free_page(virt_to_page(sp->spt));
752 __free_page(virt_to_page(sp->gfns));
753 kfree(sp);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800754 ++kvm->arch.n_free_mmu_pages;
Avi Kivity260746c2007-01-05 16:36:49 -0800755}
756
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800757static unsigned kvm_page_table_hashfn(gfn_t gfn)
758{
Dong, Eddie1ae0a132008-01-07 13:20:25 +0200759 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800760}
761
Avi Kivity25c0de22007-01-05 16:36:42 -0800762static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
763 u64 *parent_pte)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800764{
Avi Kivity4db35312007-11-21 15:28:32 +0200765 struct kvm_mmu_page *sp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800766
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800767 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
768 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
769 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
Avi Kivity4db35312007-11-21 15:28:32 +0200770 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800771 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
Avi Kivity4db35312007-11-21 15:28:32 +0200772 ASSERT(is_empty_shadow_page(sp->spt));
773 sp->slot_bitmap = 0;
774 sp->multimapped = 0;
775 sp->parent_pte = parent_pte;
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800776 --vcpu->kvm->arch.n_free_mmu_pages;
Avi Kivity4db35312007-11-21 15:28:32 +0200777 return sp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800778}
779
Avi Kivity714b93d2007-01-05 16:36:53 -0800780static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
Avi Kivity4db35312007-11-21 15:28:32 +0200781 struct kvm_mmu_page *sp, u64 *parent_pte)
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800782{
783 struct kvm_pte_chain *pte_chain;
784 struct hlist_node *node;
785 int i;
786
787 if (!parent_pte)
788 return;
Avi Kivity4db35312007-11-21 15:28:32 +0200789 if (!sp->multimapped) {
790 u64 *old = sp->parent_pte;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800791
792 if (!old) {
Avi Kivity4db35312007-11-21 15:28:32 +0200793 sp->parent_pte = parent_pte;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800794 return;
795 }
Avi Kivity4db35312007-11-21 15:28:32 +0200796 sp->multimapped = 1;
Avi Kivity714b93d2007-01-05 16:36:53 -0800797 pte_chain = mmu_alloc_pte_chain(vcpu);
Avi Kivity4db35312007-11-21 15:28:32 +0200798 INIT_HLIST_HEAD(&sp->parent_ptes);
799 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800800 pte_chain->parent_ptes[0] = old;
801 }
Avi Kivity4db35312007-11-21 15:28:32 +0200802 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800803 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
804 continue;
805 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
806 if (!pte_chain->parent_ptes[i]) {
807 pte_chain->parent_ptes[i] = parent_pte;
808 return;
809 }
810 }
Avi Kivity714b93d2007-01-05 16:36:53 -0800811 pte_chain = mmu_alloc_pte_chain(vcpu);
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800812 BUG_ON(!pte_chain);
Avi Kivity4db35312007-11-21 15:28:32 +0200813 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800814 pte_chain->parent_ptes[0] = parent_pte;
815}
816
Avi Kivity4db35312007-11-21 15:28:32 +0200817static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800818 u64 *parent_pte)
819{
820 struct kvm_pte_chain *pte_chain;
821 struct hlist_node *node;
822 int i;
823
Avi Kivity4db35312007-11-21 15:28:32 +0200824 if (!sp->multimapped) {
825 BUG_ON(sp->parent_pte != parent_pte);
826 sp->parent_pte = NULL;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800827 return;
828 }
Avi Kivity4db35312007-11-21 15:28:32 +0200829 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800830 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
831 if (!pte_chain->parent_ptes[i])
832 break;
833 if (pte_chain->parent_ptes[i] != parent_pte)
834 continue;
Avi Kivity697fe2e2007-01-05 16:36:46 -0800835 while (i + 1 < NR_PTE_CHAIN_ENTRIES
836 && pte_chain->parent_ptes[i + 1]) {
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800837 pte_chain->parent_ptes[i]
838 = pte_chain->parent_ptes[i + 1];
839 ++i;
840 }
841 pte_chain->parent_ptes[i] = NULL;
Avi Kivity697fe2e2007-01-05 16:36:46 -0800842 if (i == 0) {
843 hlist_del(&pte_chain->link);
Avi Kivity90cb0522007-07-17 13:04:56 +0300844 mmu_free_pte_chain(pte_chain);
Avi Kivity4db35312007-11-21 15:28:32 +0200845 if (hlist_empty(&sp->parent_ptes)) {
846 sp->multimapped = 0;
847 sp->parent_pte = NULL;
Avi Kivity697fe2e2007-01-05 16:36:46 -0800848 }
849 }
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800850 return;
851 }
852 BUG();
853}
854
Avi Kivityd761a502008-05-29 14:55:03 +0300855static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
856 struct kvm_mmu_page *sp)
857{
858 int i;
859
860 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
861 sp->spt[i] = shadow_trap_nonpresent_pte;
862}
863
Avi Kivity4db35312007-11-21 15:28:32 +0200864static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800865{
866 unsigned index;
867 struct hlist_head *bucket;
Avi Kivity4db35312007-11-21 15:28:32 +0200868 struct kvm_mmu_page *sp;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800869 struct hlist_node *node;
870
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800871 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
Dong, Eddie1ae0a132008-01-07 13:20:25 +0200872 index = kvm_page_table_hashfn(gfn);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800873 bucket = &kvm->arch.mmu_page_hash[index];
Avi Kivity4db35312007-11-21 15:28:32 +0200874 hlist_for_each_entry(sp, node, bucket, hash_link)
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500875 if (sp->gfn == gfn && !sp->role.metaphysical
876 && !sp->role.invalid) {
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800877 pgprintk("%s: found role %x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800878 __func__, sp->role.word);
Avi Kivity4db35312007-11-21 15:28:32 +0200879 return sp;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800880 }
881 return NULL;
882}
883
884static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
885 gfn_t gfn,
886 gva_t gaddr,
887 unsigned level,
888 int metaphysical,
Avi Kivity41074d02007-12-09 17:00:02 +0200889 unsigned access,
Avi Kivityf7d9c7b2008-02-26 22:12:10 +0200890 u64 *parent_pte)
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800891{
892 union kvm_mmu_page_role role;
893 unsigned index;
894 unsigned quadrant;
895 struct hlist_head *bucket;
Avi Kivity4db35312007-11-21 15:28:32 +0200896 struct kvm_mmu_page *sp;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800897 struct hlist_node *node;
898
899 role.word = 0;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800900 role.glevels = vcpu->arch.mmu.root_level;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800901 role.level = level;
902 role.metaphysical = metaphysical;
Avi Kivity41074d02007-12-09 17:00:02 +0200903 role.access = access;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800904 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800905 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
906 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
907 role.quadrant = quadrant;
908 }
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800909 pgprintk("%s: looking gfn %lx role %x\n", __func__,
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800910 gfn, role.word);
Dong, Eddie1ae0a132008-01-07 13:20:25 +0200911 index = kvm_page_table_hashfn(gfn);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800912 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
Avi Kivity4db35312007-11-21 15:28:32 +0200913 hlist_for_each_entry(sp, node, bucket, hash_link)
914 if (sp->gfn == gfn && sp->role.word == role.word) {
915 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800916 pgprintk("%s: found\n", __func__);
Avi Kivity4db35312007-11-21 15:28:32 +0200917 return sp;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800918 }
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200919 ++vcpu->kvm->stat.mmu_cache_miss;
Avi Kivity4db35312007-11-21 15:28:32 +0200920 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
921 if (!sp)
922 return sp;
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800923 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
Avi Kivity4db35312007-11-21 15:28:32 +0200924 sp->gfn = gfn;
925 sp->role = role;
926 hlist_add_head(&sp->hash_link, bucket);
Avi Kivity374cbac2007-01-05 16:36:43 -0800927 if (!metaphysical)
Anthony Liguori4a4c9922007-10-10 20:08:41 -0500928 rmap_write_protect(vcpu->kvm, gfn);
Avi Kivity131d8272008-05-29 14:56:28 +0300929 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
930 vcpu->arch.mmu.prefetch_page(vcpu, sp);
931 else
932 nonpaging_prefetch_page(vcpu, sp);
Avi Kivity4db35312007-11-21 15:28:32 +0200933 return sp;
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800934}
935
Avi Kivity90cb0522007-07-17 13:04:56 +0300936static void kvm_mmu_page_unlink_children(struct kvm *kvm,
Avi Kivity4db35312007-11-21 15:28:32 +0200937 struct kvm_mmu_page *sp)
Avi Kivitya4360362007-01-05 16:36:45 -0800938{
Avi Kivity697fe2e2007-01-05 16:36:46 -0800939 unsigned i;
940 u64 *pt;
941 u64 ent;
942
Avi Kivity4db35312007-11-21 15:28:32 +0200943 pt = sp->spt;
Avi Kivity697fe2e2007-01-05 16:36:46 -0800944
Avi Kivity4db35312007-11-21 15:28:32 +0200945 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
Avi Kivity697fe2e2007-01-05 16:36:46 -0800946 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
Avi Kivityc7addb92007-09-16 18:58:32 +0200947 if (is_shadow_present_pte(pt[i]))
Izik Eidus290fc382007-09-27 14:11:22 +0200948 rmap_remove(kvm, &pt[i]);
Avi Kivityc7addb92007-09-16 18:58:32 +0200949 pt[i] = shadow_trap_nonpresent_pte;
Avi Kivity697fe2e2007-01-05 16:36:46 -0800950 }
951 return;
952 }
953
954 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
955 ent = pt[i];
956
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300957 if (is_shadow_present_pte(ent)) {
958 if (!is_large_pte(ent)) {
959 ent &= PT64_BASE_ADDR_MASK;
960 mmu_page_remove_parent_pte(page_header(ent),
961 &pt[i]);
962 } else {
963 --kvm->stat.lpages;
964 rmap_remove(kvm, &pt[i]);
965 }
966 }
Avi Kivityc7addb92007-09-16 18:58:32 +0200967 pt[i] = shadow_trap_nonpresent_pte;
Avi Kivity697fe2e2007-01-05 16:36:46 -0800968 }
Avi Kivitya4360362007-01-05 16:36:45 -0800969}
970
Avi Kivity4db35312007-11-21 15:28:32 +0200971static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
Avi Kivitycea0f0e2007-01-05 16:36:43 -0800972{
Avi Kivity4db35312007-11-21 15:28:32 +0200973 mmu_page_remove_parent_pte(sp, parent_pte);
Avi Kivitya4360362007-01-05 16:36:45 -0800974}
975
Avi Kivity12b7d282007-09-23 14:10:49 +0200976static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
977{
978 int i;
979
980 for (i = 0; i < KVM_MAX_VCPUS; ++i)
981 if (kvm->vcpus[i])
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800982 kvm->vcpus[i]->arch.last_pte_updated = NULL;
Avi Kivity12b7d282007-09-23 14:10:49 +0200983}
984
Avi Kivity31aa2b42008-07-11 17:59:46 +0300985static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
Avi Kivitya4360362007-01-05 16:36:45 -0800986{
987 u64 *parent_pte;
988
Avi Kivity4db35312007-11-21 15:28:32 +0200989 while (sp->multimapped || sp->parent_pte) {
990 if (!sp->multimapped)
991 parent_pte = sp->parent_pte;
Avi Kivitya4360362007-01-05 16:36:45 -0800992 else {
993 struct kvm_pte_chain *chain;
994
Avi Kivity4db35312007-11-21 15:28:32 +0200995 chain = container_of(sp->parent_ptes.first,
Avi Kivitya4360362007-01-05 16:36:45 -0800996 struct kvm_pte_chain, link);
997 parent_pte = chain->parent_ptes[0];
998 }
Avi Kivity697fe2e2007-01-05 16:36:46 -0800999 BUG_ON(!parent_pte);
Avi Kivity4db35312007-11-21 15:28:32 +02001000 kvm_mmu_put_page(sp, parent_pte);
Avi Kivityc7addb92007-09-16 18:58:32 +02001001 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
Avi Kivitya4360362007-01-05 16:36:45 -08001002 }
Avi Kivity31aa2b42008-07-11 17:59:46 +03001003}
1004
1005static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1006{
1007 ++kvm->stat.mmu_shadow_zapped;
Avi Kivity4db35312007-11-21 15:28:32 +02001008 kvm_mmu_page_unlink_children(kvm, sp);
Avi Kivity31aa2b42008-07-11 17:59:46 +03001009 kvm_mmu_unlink_parents(kvm, sp);
Avi Kivity5b5c6a52008-07-11 18:07:26 +03001010 kvm_flush_remote_tlbs(kvm);
1011 if (!sp->role.invalid && !sp->role.metaphysical)
1012 unaccount_shadowed(kvm, sp->gfn);
Avi Kivity4db35312007-11-21 15:28:32 +02001013 if (!sp->root_count) {
1014 hlist_del(&sp->hash_link);
1015 kvm_mmu_free_page(kvm, sp);
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05001016 } else {
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05001017 sp->role.invalid = 1;
Avi Kivity5b5c6a52008-07-11 18:07:26 +03001018 list_move(&sp->link, &kvm->arch.active_mmu_pages);
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05001019 kvm_reload_remote_mmus(kvm);
1020 }
Avi Kivity12b7d282007-09-23 14:10:49 +02001021 kvm_mmu_reset_last_pte_updated(kvm);
Avi Kivitya4360362007-01-05 16:36:45 -08001022}
1023
Izik Eidus82ce2c92007-10-02 18:52:55 +02001024/*
1025 * Changing the number of mmu pages allocated to the vm
1026 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1027 */
1028void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1029{
1030 /*
1031 * If we set the number of mmu pages to be smaller be than the
1032 * number of actived pages , we must to free some mmu pages before we
1033 * change the value
1034 */
1035
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001036 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
Izik Eidus82ce2c92007-10-02 18:52:55 +02001037 kvm_nr_mmu_pages) {
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001038 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
1039 - kvm->arch.n_free_mmu_pages;
Izik Eidus82ce2c92007-10-02 18:52:55 +02001040
1041 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
1042 struct kvm_mmu_page *page;
1043
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001044 page = container_of(kvm->arch.active_mmu_pages.prev,
Izik Eidus82ce2c92007-10-02 18:52:55 +02001045 struct kvm_mmu_page, link);
1046 kvm_mmu_zap_page(kvm, page);
1047 n_used_mmu_pages--;
1048 }
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001049 kvm->arch.n_free_mmu_pages = 0;
Izik Eidus82ce2c92007-10-02 18:52:55 +02001050 }
1051 else
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001052 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1053 - kvm->arch.n_alloc_mmu_pages;
Izik Eidus82ce2c92007-10-02 18:52:55 +02001054
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001055 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
Izik Eidus82ce2c92007-10-02 18:52:55 +02001056}
1057
Anthony Liguorif67a46f2007-10-10 19:25:50 -05001058static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
Avi Kivitya4360362007-01-05 16:36:45 -08001059{
1060 unsigned index;
1061 struct hlist_head *bucket;
Avi Kivity4db35312007-11-21 15:28:32 +02001062 struct kvm_mmu_page *sp;
Avi Kivitya4360362007-01-05 16:36:45 -08001063 struct hlist_node *node, *n;
1064 int r;
1065
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001066 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
Avi Kivitya4360362007-01-05 16:36:45 -08001067 r = 0;
Dong, Eddie1ae0a132008-01-07 13:20:25 +02001068 index = kvm_page_table_hashfn(gfn);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001069 bucket = &kvm->arch.mmu_page_hash[index];
Avi Kivity4db35312007-11-21 15:28:32 +02001070 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1071 if (sp->gfn == gfn && !sp->role.metaphysical) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001072 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
Avi Kivity4db35312007-11-21 15:28:32 +02001073 sp->role.word);
1074 kvm_mmu_zap_page(kvm, sp);
Avi Kivitya4360362007-01-05 16:36:45 -08001075 r = 1;
1076 }
1077 return r;
Avi Kivitycea0f0e2007-01-05 16:36:43 -08001078}
1079
Anthony Liguorif67a46f2007-10-10 19:25:50 -05001080static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
Avi Kivity97a0a012007-05-31 15:08:29 +03001081{
Avi Kivity4db35312007-11-21 15:28:32 +02001082 struct kvm_mmu_page *sp;
Avi Kivity97a0a012007-05-31 15:08:29 +03001083
Avi Kivity4db35312007-11-21 15:28:32 +02001084 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001085 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
Avi Kivity4db35312007-11-21 15:28:32 +02001086 kvm_mmu_zap_page(kvm, sp);
Avi Kivity97a0a012007-05-31 15:08:29 +03001087 }
1088}
1089
Avi Kivity38c335f2007-11-21 14:20:22 +02001090static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001091{
Avi Kivity38c335f2007-11-21 14:20:22 +02001092 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
Avi Kivity4db35312007-11-21 15:28:32 +02001093 struct kvm_mmu_page *sp = page_header(__pa(pte));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001094
Avi Kivity4db35312007-11-21 15:28:32 +02001095 __set_bit(slot, &sp->slot_bitmap);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001096}
1097
Avi Kivity039576c2007-03-20 12:46:50 +02001098struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1099{
Izik Eidus72dc67a2008-02-10 18:04:15 +02001100 struct page *page;
1101
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001102 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
Avi Kivity039576c2007-03-20 12:46:50 +02001103
1104 if (gpa == UNMAPPED_GVA)
1105 return NULL;
Izik Eidus72dc67a2008-02-10 18:04:15 +02001106
1107 down_read(&current->mm->mmap_sem);
1108 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1109 up_read(&current->mm->mmap_sem);
1110
1111 return page;
Avi Kivity039576c2007-03-20 12:46:50 +02001112}
1113
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001114static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1115 unsigned pt_access, unsigned pte_access,
1116 int user_fault, int write_fault, int dirty,
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001117 int *ptwrite, int largepage, gfn_t gfn,
Anthony Liguori35149e22008-04-02 14:46:56 -05001118 pfn_t pfn, bool speculative)
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001119{
1120 u64 spte;
Marcelo Tosatti15aaa812008-03-17 10:08:18 -03001121 int was_rmapped = 0;
Izik Eidus75e68e62008-01-12 23:49:09 +02001122 int was_writeble = is_writeble_pte(*shadow_pte);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001123
Avi Kivitybc750ba2007-12-09 18:39:41 +02001124 pgprintk("%s: spte %llx access %x write_fault %d"
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001125 " user_fault %d gfn %lx\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001126 __func__, *shadow_pte, pt_access,
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001127 write_fault, user_fault, gfn);
1128
Marcelo Tosatti15aaa812008-03-17 10:08:18 -03001129 if (is_rmap_pte(*shadow_pte)) {
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001130 /*
1131 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1132 * the parent of the now unreachable PTE.
1133 */
1134 if (largepage && !is_large_pte(*shadow_pte)) {
1135 struct kvm_mmu_page *child;
1136 u64 pte = *shadow_pte;
1137
1138 child = page_header(pte & PT64_BASE_ADDR_MASK);
1139 mmu_page_remove_parent_pte(child, shadow_pte);
Anthony Liguori35149e22008-04-02 14:46:56 -05001140 } else if (pfn != spte_to_pfn(*shadow_pte)) {
Marcelo Tosatti15aaa812008-03-17 10:08:18 -03001141 pgprintk("hfn old %lx new %lx\n",
Anthony Liguori35149e22008-04-02 14:46:56 -05001142 spte_to_pfn(*shadow_pte), pfn);
Marcelo Tosatti15aaa812008-03-17 10:08:18 -03001143 rmap_remove(vcpu->kvm, shadow_pte);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001144 } else {
1145 if (largepage)
1146 was_rmapped = is_large_pte(*shadow_pte);
1147 else
1148 was_rmapped = 1;
Marcelo Tosatti15aaa812008-03-17 10:08:18 -03001149 }
Marcelo Tosatti15aaa812008-03-17 10:08:18 -03001150 }
1151
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001152 /*
1153 * We don't set the accessed bit, since we sometimes want to see
1154 * whether the guest actually used the pte (in order to detect
1155 * demand paging).
1156 */
Sheng Yang7b523452008-04-25 21:13:50 +08001157 spte = shadow_base_present_pte | shadow_dirty_mask;
Avi Kivity947da532008-03-18 11:05:52 +02001158 if (!speculative)
1159 pte_access |= PT_ACCESSED_MASK;
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001160 if (!dirty)
1161 pte_access &= ~ACC_WRITE_MASK;
Sheng Yang7b523452008-04-25 21:13:50 +08001162 if (pte_access & ACC_EXEC_MASK)
1163 spte |= shadow_x_mask;
1164 else
1165 spte |= shadow_nx_mask;
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001166 if (pte_access & ACC_USER_MASK)
Sheng Yang7b523452008-04-25 21:13:50 +08001167 spte |= shadow_user_mask;
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001168 if (largepage)
1169 spte |= PT_PAGE_SIZE_MASK;
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001170
Anthony Liguori35149e22008-04-02 14:46:56 -05001171 spte |= (u64)pfn << PAGE_SHIFT;
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001172
1173 if ((pte_access & ACC_WRITE_MASK)
1174 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1175 struct kvm_mmu_page *shadow;
1176
1177 spte |= PT_WRITABLE_MASK;
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001178
1179 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001180 if (shadow ||
1181 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001182 pgprintk("%s: found shadow page for %lx, marking ro\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001183 __func__, gfn);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001184 pte_access &= ~ACC_WRITE_MASK;
1185 if (is_writeble_pte(spte)) {
1186 spte &= ~PT_WRITABLE_MASK;
1187 kvm_x86_ops->tlb_flush(vcpu);
1188 }
1189 if (write_fault)
1190 *ptwrite = 1;
1191 }
1192 }
1193
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001194 if (pte_access & ACC_WRITE_MASK)
1195 mark_page_dirty(vcpu->kvm, gfn);
1196
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001197 pgprintk("%s: setting spte %llx\n", __func__, spte);
Avi Kivitydb475c32008-06-22 16:46:22 +03001198 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001199 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1200 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001201 set_shadow_pte(shadow_pte, spte);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001202 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1203 && (spte & PT_PRESENT_MASK))
1204 ++vcpu->kvm->stat.lpages;
1205
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001206 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1207 if (!was_rmapped) {
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001208 rmap_add(vcpu, shadow_pte, gfn, largepage);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001209 if (!is_rmap_pte(*shadow_pte))
Anthony Liguori35149e22008-04-02 14:46:56 -05001210 kvm_release_pfn_clean(pfn);
Izik Eidus75e68e62008-01-12 23:49:09 +02001211 } else {
1212 if (was_writeble)
Anthony Liguori35149e22008-04-02 14:46:56 -05001213 kvm_release_pfn_dirty(pfn);
Izik Eidus75e68e62008-01-12 23:49:09 +02001214 else
Anthony Liguori35149e22008-04-02 14:46:56 -05001215 kvm_release_pfn_clean(pfn);
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001216 }
Avi Kivity1b7fcd32008-05-15 13:51:35 +03001217 if (speculative) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001218 vcpu->arch.last_pte_updated = shadow_pte;
Avi Kivity1b7fcd32008-05-15 13:51:35 +03001219 vcpu->arch.last_pte_gfn = gfn;
1220 }
Avi Kivity1c4f1fd2007-12-09 17:40:31 +02001221}
1222
Avi Kivity6aa8b732006-12-10 02:21:36 -08001223static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1224{
1225}
1226
Joerg Roedel4d9976b2008-02-07 13:47:42 +01001227static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
Anthony Liguori35149e22008-04-02 14:46:56 -05001228 int largepage, gfn_t gfn, pfn_t pfn,
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001229 int level)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001230{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001231 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
Avi Kivitye8332402007-12-09 18:43:00 +02001232 int pt_write = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001233
1234 for (; ; level--) {
1235 u32 index = PT64_INDEX(v, level);
1236 u64 *table;
1237
1238 ASSERT(VALID_PAGE(table_addr));
1239 table = __va(table_addr);
1240
1241 if (level == 1) {
Avi Kivitye8332402007-12-09 18:43:00 +02001242 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
Anthony Liguori35149e22008-04-02 14:46:56 -05001243 0, write, 1, &pt_write, 0, gfn, pfn, false);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001244 return pt_write;
1245 }
1246
1247 if (largepage && level == 2) {
1248 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
Anthony Liguori35149e22008-04-02 14:46:56 -05001249 0, write, 1, &pt_write, 1, gfn, pfn, false);
Avi Kivityd196e342008-01-24 11:44:11 +02001250 return pt_write;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001251 }
1252
Avi Kivityc7addb92007-09-16 18:58:32 +02001253 if (table[index] == shadow_trap_nonpresent_pte) {
Avi Kivity25c0de22007-01-05 16:36:42 -08001254 struct kvm_mmu_page *new_table;
Avi Kivitycea0f0e2007-01-05 16:36:43 -08001255 gfn_t pseudo_gfn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001256
Avi Kivitycea0f0e2007-01-05 16:36:43 -08001257 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1258 >> PAGE_SHIFT;
1259 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1260 v, level - 1,
Avi Kivityf7d9c7b2008-02-26 22:12:10 +02001261 1, ACC_ALL, &table[index]);
Avi Kivity25c0de22007-01-05 16:36:42 -08001262 if (!new_table) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001263 pgprintk("nonpaging_map: ENOMEM\n");
Anthony Liguori35149e22008-04-02 14:46:56 -05001264 kvm_release_pfn_clean(pfn);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001265 return -ENOMEM;
1266 }
1267
Avi Kivity722c05f2008-07-13 11:33:54 +03001268 set_shadow_pte(&table[index],
1269 __pa(new_table->spt)
1270 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1271 | shadow_user_mask | shadow_x_mask);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001272 }
1273 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1274 }
1275}
1276
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001277static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1278{
1279 int r;
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001280 int largepage = 0;
Anthony Liguori35149e22008-04-02 14:46:56 -05001281 pfn_t pfn;
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001282 unsigned long mmu_seq;
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001283
1284 down_read(&current->mm->mmap_sem);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001285 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1286 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1287 largepage = 1;
1288 }
1289
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001290 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1291 /* implicit mb(), we'll read before PT lock is unlocked */
Anthony Liguori35149e22008-04-02 14:46:56 -05001292 pfn = gfn_to_pfn(vcpu->kvm, gfn);
Izik Eidus72dc67a2008-02-10 18:04:15 +02001293 up_read(&current->mm->mmap_sem);
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001294
Avi Kivityd196e342008-01-24 11:44:11 +02001295 /* mmio */
Anthony Liguori35149e22008-04-02 14:46:56 -05001296 if (is_error_pfn(pfn)) {
1297 kvm_release_pfn_clean(pfn);
Avi Kivityd196e342008-01-24 11:44:11 +02001298 return 1;
1299 }
1300
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001301 spin_lock(&vcpu->kvm->mmu_lock);
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001302 if (mmu_notifier_retry(vcpu, mmu_seq))
1303 goto out_unlock;
Avi Kivityeb787d12007-12-31 15:27:49 +02001304 kvm_mmu_free_some_pages(vcpu);
Anthony Liguori35149e22008-04-02 14:46:56 -05001305 r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001306 PT32E_ROOT_LEVEL);
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001307 spin_unlock(&vcpu->kvm->mmu_lock);
1308
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001309
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001310 return r;
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001311
1312out_unlock:
1313 spin_unlock(&vcpu->kvm->mmu_lock);
1314 kvm_release_pfn_clean(pfn);
1315 return 0;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001316}
1317
1318
Avi Kivity17ac10a2007-01-05 16:36:40 -08001319static void mmu_free_roots(struct kvm_vcpu *vcpu)
1320{
1321 int i;
Avi Kivity4db35312007-11-21 15:28:32 +02001322 struct kvm_mmu_page *sp;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001323
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001324 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
Avi Kivity7b53aa52007-06-05 12:17:03 +03001325 return;
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001326 spin_lock(&vcpu->kvm->mmu_lock);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001327 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1328 hpa_t root = vcpu->arch.mmu.root_hpa;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001329
Avi Kivity4db35312007-11-21 15:28:32 +02001330 sp = page_header(root);
1331 --sp->root_count;
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05001332 if (!sp->root_count && sp->role.invalid)
1333 kvm_mmu_zap_page(vcpu->kvm, sp);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001334 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001335 spin_unlock(&vcpu->kvm->mmu_lock);
Avi Kivity17ac10a2007-01-05 16:36:40 -08001336 return;
1337 }
Avi Kivity17ac10a2007-01-05 16:36:40 -08001338 for (i = 0; i < 4; ++i) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001339 hpa_t root = vcpu->arch.mmu.pae_root[i];
Avi Kivity17ac10a2007-01-05 16:36:40 -08001340
Avi Kivity417726a2007-04-12 17:35:58 +03001341 if (root) {
Avi Kivity417726a2007-04-12 17:35:58 +03001342 root &= PT64_BASE_ADDR_MASK;
Avi Kivity4db35312007-11-21 15:28:32 +02001343 sp = page_header(root);
1344 --sp->root_count;
Marcelo Tosatti2e53d632008-02-20 14:47:24 -05001345 if (!sp->root_count && sp->role.invalid)
1346 kvm_mmu_zap_page(vcpu->kvm, sp);
Avi Kivity417726a2007-04-12 17:35:58 +03001347 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001348 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001349 }
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001350 spin_unlock(&vcpu->kvm->mmu_lock);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001351 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001352}
1353
1354static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1355{
1356 int i;
Avi Kivitycea0f0e2007-01-05 16:36:43 -08001357 gfn_t root_gfn;
Avi Kivity4db35312007-11-21 15:28:32 +02001358 struct kvm_mmu_page *sp;
Joerg Roedelfb72d162008-02-07 13:47:44 +01001359 int metaphysical = 0;
Avi Kivity3bb65a22007-01-05 16:36:51 -08001360
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001361 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001362
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001363 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1364 hpa_t root = vcpu->arch.mmu.root_hpa;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001365
1366 ASSERT(!VALID_PAGE(root));
Joerg Roedelfb72d162008-02-07 13:47:44 +01001367 if (tdp_enabled)
1368 metaphysical = 1;
Avi Kivity4db35312007-11-21 15:28:32 +02001369 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
Joerg Roedelfb72d162008-02-07 13:47:44 +01001370 PT64_ROOT_LEVEL, metaphysical,
1371 ACC_ALL, NULL);
Avi Kivity4db35312007-11-21 15:28:32 +02001372 root = __pa(sp->spt);
1373 ++sp->root_count;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001374 vcpu->arch.mmu.root_hpa = root;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001375 return;
1376 }
Joerg Roedelfb72d162008-02-07 13:47:44 +01001377 metaphysical = !is_paging(vcpu);
1378 if (tdp_enabled)
1379 metaphysical = 1;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001380 for (i = 0; i < 4; ++i) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001381 hpa_t root = vcpu->arch.mmu.pae_root[i];
Avi Kivity17ac10a2007-01-05 16:36:40 -08001382
1383 ASSERT(!VALID_PAGE(root));
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001384 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1385 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1386 vcpu->arch.mmu.pae_root[i] = 0;
Avi Kivity417726a2007-04-12 17:35:58 +03001387 continue;
1388 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001389 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1390 } else if (vcpu->arch.mmu.root_level == 0)
Avi Kivitycea0f0e2007-01-05 16:36:43 -08001391 root_gfn = 0;
Avi Kivity4db35312007-11-21 15:28:32 +02001392 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
Joerg Roedelfb72d162008-02-07 13:47:44 +01001393 PT32_ROOT_LEVEL, metaphysical,
Avi Kivityf7d9c7b2008-02-26 22:12:10 +02001394 ACC_ALL, NULL);
Avi Kivity4db35312007-11-21 15:28:32 +02001395 root = __pa(sp->spt);
1396 ++sp->root_count;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001397 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001398 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001399 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
Avi Kivity17ac10a2007-01-05 16:36:40 -08001400}
1401
Avi Kivity6aa8b732006-12-10 02:21:36 -08001402static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1403{
1404 return vaddr;
1405}
1406
1407static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
Avi Kivity3f3e7122007-11-21 14:54:16 +02001408 u32 error_code)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001409{
Avi Kivitye8332402007-12-09 18:43:00 +02001410 gfn_t gfn;
Avi Kivitye2dec932007-01-05 16:36:54 -08001411 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001412
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001413 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
Avi Kivitye2dec932007-01-05 16:36:54 -08001414 r = mmu_topup_memory_caches(vcpu);
1415 if (r)
1416 return r;
Avi Kivity714b93d2007-01-05 16:36:53 -08001417
Avi Kivity6aa8b732006-12-10 02:21:36 -08001418 ASSERT(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001419 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001420
Avi Kivitye8332402007-12-09 18:43:00 +02001421 gfn = gva >> PAGE_SHIFT;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001422
Avi Kivitye8332402007-12-09 18:43:00 +02001423 return nonpaging_map(vcpu, gva & PAGE_MASK,
1424 error_code & PFERR_WRITE_MASK, gfn);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001425}
1426
Joerg Roedelfb72d162008-02-07 13:47:44 +01001427static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1428 u32 error_code)
1429{
Anthony Liguori35149e22008-04-02 14:46:56 -05001430 pfn_t pfn;
Joerg Roedelfb72d162008-02-07 13:47:44 +01001431 int r;
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001432 int largepage = 0;
1433 gfn_t gfn = gpa >> PAGE_SHIFT;
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001434 unsigned long mmu_seq;
Joerg Roedelfb72d162008-02-07 13:47:44 +01001435
1436 ASSERT(vcpu);
1437 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1438
1439 r = mmu_topup_memory_caches(vcpu);
1440 if (r)
1441 return r;
1442
1443 down_read(&current->mm->mmap_sem);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001444 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1445 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1446 largepage = 1;
1447 }
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001448 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1449 /* implicit mb(), we'll read before PT lock is unlocked */
Anthony Liguori35149e22008-04-02 14:46:56 -05001450 pfn = gfn_to_pfn(vcpu->kvm, gfn);
Marcelo Tosatti3200f402008-03-29 20:17:59 -03001451 up_read(&current->mm->mmap_sem);
Anthony Liguori35149e22008-04-02 14:46:56 -05001452 if (is_error_pfn(pfn)) {
1453 kvm_release_pfn_clean(pfn);
Joerg Roedelfb72d162008-02-07 13:47:44 +01001454 return 1;
1455 }
1456 spin_lock(&vcpu->kvm->mmu_lock);
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001457 if (mmu_notifier_retry(vcpu, mmu_seq))
1458 goto out_unlock;
Joerg Roedelfb72d162008-02-07 13:47:44 +01001459 kvm_mmu_free_some_pages(vcpu);
1460 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
Sheng Yang67253af2008-04-25 10:20:22 +08001461 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
Joerg Roedelfb72d162008-02-07 13:47:44 +01001462 spin_unlock(&vcpu->kvm->mmu_lock);
Joerg Roedelfb72d162008-02-07 13:47:44 +01001463
1464 return r;
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001465
1466out_unlock:
1467 spin_unlock(&vcpu->kvm->mmu_lock);
1468 kvm_release_pfn_clean(pfn);
1469 return 0;
Joerg Roedelfb72d162008-02-07 13:47:44 +01001470}
1471
Avi Kivity6aa8b732006-12-10 02:21:36 -08001472static void nonpaging_free(struct kvm_vcpu *vcpu)
1473{
Avi Kivity17ac10a2007-01-05 16:36:40 -08001474 mmu_free_roots(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001475}
1476
1477static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1478{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001479 struct kvm_mmu *context = &vcpu->arch.mmu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001480
1481 context->new_cr3 = nonpaging_new_cr3;
1482 context->page_fault = nonpaging_page_fault;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001483 context->gva_to_gpa = nonpaging_gva_to_gpa;
1484 context->free = nonpaging_free;
Avi Kivityc7addb92007-09-16 18:58:32 +02001485 context->prefetch_page = nonpaging_prefetch_page;
Avi Kivitycea0f0e2007-01-05 16:36:43 -08001486 context->root_level = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001487 context->shadow_root_level = PT32E_ROOT_LEVEL;
Avi Kivity17c3ba92007-06-04 15:58:30 +03001488 context->root_hpa = INVALID_PAGE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001489 return 0;
1490}
1491
Avi Kivityd835dfe2007-11-21 02:57:59 +02001492void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001493{
Avi Kivity1165f5f2007-04-19 17:27:43 +03001494 ++vcpu->stat.tlb_flush;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001495 kvm_x86_ops->tlb_flush(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001496}
1497
1498static void paging_new_cr3(struct kvm_vcpu *vcpu)
1499{
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001500 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
Avi Kivitycea0f0e2007-01-05 16:36:43 -08001501 mmu_free_roots(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001502}
1503
Avi Kivity6aa8b732006-12-10 02:21:36 -08001504static void inject_page_fault(struct kvm_vcpu *vcpu,
1505 u64 addr,
1506 u32 err_code)
1507{
Avi Kivityc3c91fe2007-11-25 14:04:58 +02001508 kvm_inject_page_fault(vcpu, addr, err_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001509}
1510
Avi Kivity6aa8b732006-12-10 02:21:36 -08001511static void paging_free(struct kvm_vcpu *vcpu)
1512{
1513 nonpaging_free(vcpu);
1514}
1515
1516#define PTTYPE 64
1517#include "paging_tmpl.h"
1518#undef PTTYPE
1519
1520#define PTTYPE 32
1521#include "paging_tmpl.h"
1522#undef PTTYPE
1523
Avi Kivity17ac10a2007-01-05 16:36:40 -08001524static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001525{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001526 struct kvm_mmu *context = &vcpu->arch.mmu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001527
1528 ASSERT(is_pae(vcpu));
1529 context->new_cr3 = paging_new_cr3;
1530 context->page_fault = paging64_page_fault;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001531 context->gva_to_gpa = paging64_gva_to_gpa;
Avi Kivityc7addb92007-09-16 18:58:32 +02001532 context->prefetch_page = paging64_prefetch_page;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001533 context->free = paging_free;
Avi Kivity17ac10a2007-01-05 16:36:40 -08001534 context->root_level = level;
1535 context->shadow_root_level = level;
Avi Kivity17c3ba92007-06-04 15:58:30 +03001536 context->root_hpa = INVALID_PAGE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001537 return 0;
1538}
1539
Avi Kivity17ac10a2007-01-05 16:36:40 -08001540static int paging64_init_context(struct kvm_vcpu *vcpu)
1541{
1542 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1543}
1544
Avi Kivity6aa8b732006-12-10 02:21:36 -08001545static int paging32_init_context(struct kvm_vcpu *vcpu)
1546{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001547 struct kvm_mmu *context = &vcpu->arch.mmu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001548
1549 context->new_cr3 = paging_new_cr3;
1550 context->page_fault = paging32_page_fault;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001551 context->gva_to_gpa = paging32_gva_to_gpa;
1552 context->free = paging_free;
Avi Kivityc7addb92007-09-16 18:58:32 +02001553 context->prefetch_page = paging32_prefetch_page;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001554 context->root_level = PT32_ROOT_LEVEL;
1555 context->shadow_root_level = PT32E_ROOT_LEVEL;
Avi Kivity17c3ba92007-06-04 15:58:30 +03001556 context->root_hpa = INVALID_PAGE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001557 return 0;
1558}
1559
1560static int paging32E_init_context(struct kvm_vcpu *vcpu)
1561{
Avi Kivity17ac10a2007-01-05 16:36:40 -08001562 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001563}
1564
Joerg Roedelfb72d162008-02-07 13:47:44 +01001565static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1566{
1567 struct kvm_mmu *context = &vcpu->arch.mmu;
1568
1569 context->new_cr3 = nonpaging_new_cr3;
1570 context->page_fault = tdp_page_fault;
1571 context->free = nonpaging_free;
1572 context->prefetch_page = nonpaging_prefetch_page;
Sheng Yang67253af2008-04-25 10:20:22 +08001573 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
Joerg Roedelfb72d162008-02-07 13:47:44 +01001574 context->root_hpa = INVALID_PAGE;
1575
1576 if (!is_paging(vcpu)) {
1577 context->gva_to_gpa = nonpaging_gva_to_gpa;
1578 context->root_level = 0;
1579 } else if (is_long_mode(vcpu)) {
1580 context->gva_to_gpa = paging64_gva_to_gpa;
1581 context->root_level = PT64_ROOT_LEVEL;
1582 } else if (is_pae(vcpu)) {
1583 context->gva_to_gpa = paging64_gva_to_gpa;
1584 context->root_level = PT32E_ROOT_LEVEL;
1585 } else {
1586 context->gva_to_gpa = paging32_gva_to_gpa;
1587 context->root_level = PT32_ROOT_LEVEL;
1588 }
1589
1590 return 0;
1591}
1592
1593static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001594{
1595 ASSERT(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001596 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001597
1598 if (!is_paging(vcpu))
1599 return nonpaging_init_context(vcpu);
Avi Kivitya9058ec2006-12-29 16:49:37 -08001600 else if (is_long_mode(vcpu))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001601 return paging64_init_context(vcpu);
1602 else if (is_pae(vcpu))
1603 return paging32E_init_context(vcpu);
1604 else
1605 return paging32_init_context(vcpu);
1606}
1607
Joerg Roedelfb72d162008-02-07 13:47:44 +01001608static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1609{
Anthony Liguori35149e22008-04-02 14:46:56 -05001610 vcpu->arch.update_pte.pfn = bad_pfn;
1611
Joerg Roedelfb72d162008-02-07 13:47:44 +01001612 if (tdp_enabled)
1613 return init_kvm_tdp_mmu(vcpu);
1614 else
1615 return init_kvm_softmmu(vcpu);
1616}
1617
Avi Kivity6aa8b732006-12-10 02:21:36 -08001618static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1619{
1620 ASSERT(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001621 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1622 vcpu->arch.mmu.free(vcpu);
1623 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001624 }
1625}
1626
1627int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1628{
Avi Kivity17c3ba92007-06-04 15:58:30 +03001629 destroy_kvm_mmu(vcpu);
1630 return init_kvm_mmu(vcpu);
1631}
Eddie Dong8668a3c2007-10-10 14:26:45 +08001632EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
Avi Kivity17c3ba92007-06-04 15:58:30 +03001633
1634int kvm_mmu_load(struct kvm_vcpu *vcpu)
1635{
Avi Kivity714b93d2007-01-05 16:36:53 -08001636 int r;
1637
Avi Kivitye2dec932007-01-05 16:36:54 -08001638 r = mmu_topup_memory_caches(vcpu);
Avi Kivity17c3ba92007-06-04 15:58:30 +03001639 if (r)
1640 goto out;
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001641 spin_lock(&vcpu->kvm->mmu_lock);
Avi Kivityeb787d12007-12-31 15:27:49 +02001642 kvm_mmu_free_some_pages(vcpu);
Avi Kivity17c3ba92007-06-04 15:58:30 +03001643 mmu_alloc_roots(vcpu);
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001644 spin_unlock(&vcpu->kvm->mmu_lock);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001645 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
Avi Kivity17c3ba92007-06-04 15:58:30 +03001646 kvm_mmu_flush_tlb(vcpu);
Avi Kivity714b93d2007-01-05 16:36:53 -08001647out:
1648 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001649}
Avi Kivity17c3ba92007-06-04 15:58:30 +03001650EXPORT_SYMBOL_GPL(kvm_mmu_load);
1651
1652void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1653{
1654 mmu_free_roots(vcpu);
1655}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001656
Avi Kivity09072da2007-05-01 14:16:52 +03001657static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
Avi Kivity4db35312007-11-21 15:28:32 +02001658 struct kvm_mmu_page *sp,
Avi Kivityac1b7142007-03-08 17:13:32 +02001659 u64 *spte)
1660{
1661 u64 pte;
1662 struct kvm_mmu_page *child;
1663
1664 pte = *spte;
Avi Kivityc7addb92007-09-16 18:58:32 +02001665 if (is_shadow_present_pte(pte)) {
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001666 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1667 is_large_pte(pte))
Izik Eidus290fc382007-09-27 14:11:22 +02001668 rmap_remove(vcpu->kvm, spte);
Avi Kivityac1b7142007-03-08 17:13:32 +02001669 else {
1670 child = page_header(pte & PT64_BASE_ADDR_MASK);
Avi Kivity90cb0522007-07-17 13:04:56 +03001671 mmu_page_remove_parent_pte(child, spte);
Avi Kivityac1b7142007-03-08 17:13:32 +02001672 }
1673 }
Avi Kivityc7addb92007-09-16 18:58:32 +02001674 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001675 if (is_large_pte(pte))
1676 --vcpu->kvm->stat.lpages;
Avi Kivityac1b7142007-03-08 17:13:32 +02001677}
1678
Avi Kivity00284252007-05-01 16:53:31 +03001679static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
Avi Kivity4db35312007-11-21 15:28:32 +02001680 struct kvm_mmu_page *sp,
Avi Kivity00284252007-05-01 16:53:31 +03001681 u64 *spte,
Dong, Eddie489f1d62008-01-07 11:14:20 +02001682 const void *new)
Avi Kivity00284252007-05-01 16:53:31 +03001683{
Marcelo Tosatti30945382008-06-11 20:32:40 -03001684 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1685 if (!vcpu->arch.update_pte.largepage ||
1686 sp->role.glevels == PT32_ROOT_LEVEL) {
1687 ++vcpu->kvm->stat.mmu_pde_zapped;
1688 return;
1689 }
1690 }
Avi Kivity00284252007-05-01 16:53:31 +03001691
Avi Kivity4cee5762007-11-18 16:37:07 +02001692 ++vcpu->kvm->stat.mmu_pte_updated;
Avi Kivity4db35312007-11-21 15:28:32 +02001693 if (sp->role.glevels == PT32_ROOT_LEVEL)
Dong, Eddie489f1d62008-01-07 11:14:20 +02001694 paging32_update_pte(vcpu, sp, spte, new);
Avi Kivity00284252007-05-01 16:53:31 +03001695 else
Dong, Eddie489f1d62008-01-07 11:14:20 +02001696 paging64_update_pte(vcpu, sp, spte, new);
Avi Kivity00284252007-05-01 16:53:31 +03001697}
1698
Avi Kivity79539ce2007-11-21 02:06:21 +02001699static bool need_remote_flush(u64 old, u64 new)
1700{
1701 if (!is_shadow_present_pte(old))
1702 return false;
1703 if (!is_shadow_present_pte(new))
1704 return true;
1705 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1706 return true;
1707 old ^= PT64_NX_MASK;
1708 new ^= PT64_NX_MASK;
1709 return (old & ~new & PT64_PERM_MASK) != 0;
1710}
1711
1712static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1713{
1714 if (need_remote_flush(old, new))
1715 kvm_flush_remote_tlbs(vcpu->kvm);
1716 else
1717 kvm_mmu_flush_tlb(vcpu);
1718}
1719
Avi Kivity12b7d282007-09-23 14:10:49 +02001720static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1721{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001722 u64 *spte = vcpu->arch.last_pte_updated;
Avi Kivity12b7d282007-09-23 14:10:49 +02001723
Sheng Yang7b523452008-04-25 21:13:50 +08001724 return !!(spte && (*spte & shadow_accessed_mask));
Avi Kivity12b7d282007-09-23 14:10:49 +02001725}
1726
Avi Kivityd7824ff2007-12-30 12:29:05 +02001727static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1728 const u8 *new, int bytes)
1729{
1730 gfn_t gfn;
1731 int r;
1732 u64 gpte = 0;
Anthony Liguori35149e22008-04-02 14:46:56 -05001733 pfn_t pfn;
Avi Kivityd7824ff2007-12-30 12:29:05 +02001734
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001735 vcpu->arch.update_pte.largepage = 0;
1736
Avi Kivityd7824ff2007-12-30 12:29:05 +02001737 if (bytes != 4 && bytes != 8)
1738 return;
1739
1740 /*
1741 * Assume that the pte write on a page table of the same type
1742 * as the current vcpu paging mode. This is nearly always true
1743 * (might be false while changing modes). Note it is verified later
1744 * by update_pte().
1745 */
1746 if (is_pae(vcpu)) {
1747 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1748 if ((bytes == 4) && (gpa % 4 == 0)) {
1749 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1750 if (r)
1751 return;
1752 memcpy((void *)&gpte + (gpa % 8), new, 4);
1753 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1754 memcpy((void *)&gpte, new, 8);
1755 }
1756 } else {
1757 if ((bytes == 4) && (gpa % 4 == 0))
1758 memcpy((void *)&gpte, new, 4);
1759 }
1760 if (!is_present_pte(gpte))
1761 return;
1762 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
Izik Eidus72dc67a2008-02-10 18:04:15 +02001763
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001764 down_read(&current->mm->mmap_sem);
1765 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1766 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1767 vcpu->arch.update_pte.largepage = 1;
1768 }
Andrea Arcangelie930bff2008-07-25 16:24:52 +02001769 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
1770 /* implicit mb(), we'll read before PT lock is unlocked */
Anthony Liguori35149e22008-04-02 14:46:56 -05001771 pfn = gfn_to_pfn(vcpu->kvm, gfn);
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001772 up_read(&current->mm->mmap_sem);
Izik Eidus72dc67a2008-02-10 18:04:15 +02001773
Anthony Liguori35149e22008-04-02 14:46:56 -05001774 if (is_error_pfn(pfn)) {
1775 kvm_release_pfn_clean(pfn);
Avi Kivityd196e342008-01-24 11:44:11 +02001776 return;
1777 }
Avi Kivityd7824ff2007-12-30 12:29:05 +02001778 vcpu->arch.update_pte.gfn = gfn;
Anthony Liguori35149e22008-04-02 14:46:56 -05001779 vcpu->arch.update_pte.pfn = pfn;
Avi Kivityd7824ff2007-12-30 12:29:05 +02001780}
1781
Avi Kivity1b7fcd32008-05-15 13:51:35 +03001782static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1783{
1784 u64 *spte = vcpu->arch.last_pte_updated;
1785
1786 if (spte
1787 && vcpu->arch.last_pte_gfn == gfn
1788 && shadow_accessed_mask
1789 && !(*spte & shadow_accessed_mask)
1790 && is_shadow_present_pte(*spte))
1791 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1792}
1793
Avi Kivity09072da2007-05-01 14:16:52 +03001794void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
Shaohua Life5518812007-07-23 14:51:39 +08001795 const u8 *new, int bytes)
Avi Kivityda4a00f2007-01-05 16:36:44 -08001796{
Avi Kivity9b7a0322007-01-05 16:36:45 -08001797 gfn_t gfn = gpa >> PAGE_SHIFT;
Avi Kivity4db35312007-11-21 15:28:32 +02001798 struct kvm_mmu_page *sp;
Avi Kivity0e7bc4b2007-01-05 16:36:48 -08001799 struct hlist_node *node, *n;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001800 struct hlist_head *bucket;
1801 unsigned index;
Dong, Eddie489f1d62008-01-07 11:14:20 +02001802 u64 entry, gentry;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001803 u64 *spte;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001804 unsigned offset = offset_in_page(gpa);
Avi Kivity0e7bc4b2007-01-05 16:36:48 -08001805 unsigned pte_size;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001806 unsigned page_offset;
Avi Kivity0e7bc4b2007-01-05 16:36:48 -08001807 unsigned misaligned;
Avi Kivityfce06572007-05-01 16:44:05 +03001808 unsigned quadrant;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001809 int level;
Avi Kivity86a5ba02007-01-05 16:36:50 -08001810 int flooded = 0;
Avi Kivityac1b7142007-03-08 17:13:32 +02001811 int npte;
Dong, Eddie489f1d62008-01-07 11:14:20 +02001812 int r;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001813
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001814 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
Avi Kivityd7824ff2007-12-30 12:29:05 +02001815 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001816 spin_lock(&vcpu->kvm->mmu_lock);
Avi Kivity1b7fcd32008-05-15 13:51:35 +03001817 kvm_mmu_access_page(vcpu, gfn);
Avi Kivityeb787d12007-12-31 15:27:49 +02001818 kvm_mmu_free_some_pages(vcpu);
Avi Kivity4cee5762007-11-18 16:37:07 +02001819 ++vcpu->kvm->stat.mmu_pte_write;
Avi Kivityc7addb92007-09-16 18:58:32 +02001820 kvm_mmu_audit(vcpu, "pre pte write");
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001821 if (gfn == vcpu->arch.last_pt_write_gfn
Avi Kivity12b7d282007-09-23 14:10:49 +02001822 && !last_updated_pte_accessed(vcpu)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001823 ++vcpu->arch.last_pt_write_count;
1824 if (vcpu->arch.last_pt_write_count >= 3)
Avi Kivity86a5ba02007-01-05 16:36:50 -08001825 flooded = 1;
1826 } else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001827 vcpu->arch.last_pt_write_gfn = gfn;
1828 vcpu->arch.last_pt_write_count = 1;
1829 vcpu->arch.last_pte_updated = NULL;
Avi Kivity86a5ba02007-01-05 16:36:50 -08001830 }
Dong, Eddie1ae0a132008-01-07 13:20:25 +02001831 index = kvm_page_table_hashfn(gfn);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001832 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
Avi Kivity4db35312007-11-21 15:28:32 +02001833 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
Avi Kivity5b5c6a52008-07-11 18:07:26 +03001834 if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid)
Avi Kivity9b7a0322007-01-05 16:36:45 -08001835 continue;
Avi Kivity4db35312007-11-21 15:28:32 +02001836 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
Avi Kivity0e7bc4b2007-01-05 16:36:48 -08001837 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
Avi Kivitye925c5b2007-04-30 14:47:02 +03001838 misaligned |= bytes < 4;
Avi Kivity86a5ba02007-01-05 16:36:50 -08001839 if (misaligned || flooded) {
Avi Kivity0e7bc4b2007-01-05 16:36:48 -08001840 /*
1841 * Misaligned accesses are too much trouble to fix
1842 * up; also, they usually indicate a page is not used
1843 * as a page table.
Avi Kivity86a5ba02007-01-05 16:36:50 -08001844 *
1845 * If we're seeing too many writes to a page,
1846 * it may no longer be a page table, or we may be
1847 * forking, in which case it is better to unmap the
1848 * page.
Avi Kivity0e7bc4b2007-01-05 16:36:48 -08001849 */
1850 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
Avi Kivity4db35312007-11-21 15:28:32 +02001851 gpa, bytes, sp->role.word);
1852 kvm_mmu_zap_page(vcpu->kvm, sp);
Avi Kivity4cee5762007-11-18 16:37:07 +02001853 ++vcpu->kvm->stat.mmu_flooded;
Avi Kivity0e7bc4b2007-01-05 16:36:48 -08001854 continue;
1855 }
Avi Kivity9b7a0322007-01-05 16:36:45 -08001856 page_offset = offset;
Avi Kivity4db35312007-11-21 15:28:32 +02001857 level = sp->role.level;
Avi Kivityac1b7142007-03-08 17:13:32 +02001858 npte = 1;
Avi Kivity4db35312007-11-21 15:28:32 +02001859 if (sp->role.glevels == PT32_ROOT_LEVEL) {
Avi Kivityac1b7142007-03-08 17:13:32 +02001860 page_offset <<= 1; /* 32->64 */
1861 /*
1862 * A 32-bit pde maps 4MB while the shadow pdes map
1863 * only 2MB. So we need to double the offset again
1864 * and zap two pdes instead of one.
1865 */
1866 if (level == PT32_ROOT_LEVEL) {
Avi Kivity6b8d0f92007-04-18 11:18:18 +03001867 page_offset &= ~7; /* kill rounding error */
Avi Kivityac1b7142007-03-08 17:13:32 +02001868 page_offset <<= 1;
1869 npte = 2;
1870 }
Avi Kivityfce06572007-05-01 16:44:05 +03001871 quadrant = page_offset >> PAGE_SHIFT;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001872 page_offset &= ~PAGE_MASK;
Avi Kivity4db35312007-11-21 15:28:32 +02001873 if (quadrant != sp->role.quadrant)
Avi Kivityfce06572007-05-01 16:44:05 +03001874 continue;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001875 }
Avi Kivity4db35312007-11-21 15:28:32 +02001876 spte = &sp->spt[page_offset / sizeof(*spte)];
Dong, Eddie489f1d62008-01-07 11:14:20 +02001877 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1878 gentry = 0;
1879 r = kvm_read_guest_atomic(vcpu->kvm,
1880 gpa & ~(u64)(pte_size - 1),
1881 &gentry, pte_size);
1882 new = (const void *)&gentry;
1883 if (r < 0)
1884 new = NULL;
1885 }
Avi Kivityac1b7142007-03-08 17:13:32 +02001886 while (npte--) {
Avi Kivity79539ce2007-11-21 02:06:21 +02001887 entry = *spte;
Avi Kivity4db35312007-11-21 15:28:32 +02001888 mmu_pte_write_zap_pte(vcpu, sp, spte);
Dong, Eddie489f1d62008-01-07 11:14:20 +02001889 if (new)
1890 mmu_pte_write_new_pte(vcpu, sp, spte, new);
Avi Kivity79539ce2007-11-21 02:06:21 +02001891 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
Avi Kivityac1b7142007-03-08 17:13:32 +02001892 ++spte;
Avi Kivity9b7a0322007-01-05 16:36:45 -08001893 }
Avi Kivity9b7a0322007-01-05 16:36:45 -08001894 }
Avi Kivityc7addb92007-09-16 18:58:32 +02001895 kvm_mmu_audit(vcpu, "post pte write");
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001896 spin_unlock(&vcpu->kvm->mmu_lock);
Anthony Liguori35149e22008-04-02 14:46:56 -05001897 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
1898 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
1899 vcpu->arch.update_pte.pfn = bad_pfn;
Avi Kivityd7824ff2007-12-30 12:29:05 +02001900 }
Avi Kivityda4a00f2007-01-05 16:36:44 -08001901}
1902
Avi Kivitya4360362007-01-05 16:36:45 -08001903int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1904{
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001905 gpa_t gpa;
1906 int r;
Avi Kivitya4360362007-01-05 16:36:45 -08001907
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001908 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001909
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001910 spin_lock(&vcpu->kvm->mmu_lock);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001911 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05001912 spin_unlock(&vcpu->kvm->mmu_lock);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05001913 return r;
Avi Kivitya4360362007-01-05 16:36:45 -08001914}
Avi Kivity577bdc42008-07-19 08:57:05 +03001915EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
Avi Kivitya4360362007-01-05 16:36:45 -08001916
Avi Kivity22d95b12007-09-14 20:26:06 +03001917void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
Avi Kivityebeace82007-01-05 16:36:47 -08001918{
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001919 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
Avi Kivity4db35312007-11-21 15:28:32 +02001920 struct kvm_mmu_page *sp;
Avi Kivityebeace82007-01-05 16:36:47 -08001921
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001922 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
Avi Kivity4db35312007-11-21 15:28:32 +02001923 struct kvm_mmu_page, link);
1924 kvm_mmu_zap_page(vcpu->kvm, sp);
Avi Kivity4cee5762007-11-18 16:37:07 +02001925 ++vcpu->kvm->stat.mmu_recycled;
Avi Kivityebeace82007-01-05 16:36:47 -08001926 }
1927}
Avi Kivityebeace82007-01-05 16:36:47 -08001928
Avi Kivity30677142007-10-28 18:48:59 +02001929int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1930{
1931 int r;
1932 enum emulation_result er;
1933
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001934 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
Avi Kivity30677142007-10-28 18:48:59 +02001935 if (r < 0)
1936 goto out;
1937
1938 if (!r) {
1939 r = 1;
1940 goto out;
1941 }
1942
Avi Kivityb733bfb2007-10-28 18:52:05 +02001943 r = mmu_topup_memory_caches(vcpu);
1944 if (r)
1945 goto out;
1946
Avi Kivity30677142007-10-28 18:48:59 +02001947 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
Avi Kivity30677142007-10-28 18:48:59 +02001948
1949 switch (er) {
1950 case EMULATE_DONE:
1951 return 1;
1952 case EMULATE_DO_MMIO:
1953 ++vcpu->stat.mmio_exits;
1954 return 0;
1955 case EMULATE_FAIL:
1956 kvm_report_emulation_failure(vcpu, "pagetable");
1957 return 1;
1958 default:
1959 BUG();
1960 }
1961out:
Avi Kivity30677142007-10-28 18:48:59 +02001962 return r;
1963}
1964EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1965
Joerg Roedel18552672008-02-07 13:47:41 +01001966void kvm_enable_tdp(void)
1967{
1968 tdp_enabled = true;
1969}
1970EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1971
Joerg Roedel5f4cb662008-07-14 20:36:36 +02001972void kvm_disable_tdp(void)
1973{
1974 tdp_enabled = false;
1975}
1976EXPORT_SYMBOL_GPL(kvm_disable_tdp);
1977
Avi Kivity6aa8b732006-12-10 02:21:36 -08001978static void free_mmu_pages(struct kvm_vcpu *vcpu)
1979{
Avi Kivity4db35312007-11-21 15:28:32 +02001980 struct kvm_mmu_page *sp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001981
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001982 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1983 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
Avi Kivity4db35312007-11-21 15:28:32 +02001984 struct kvm_mmu_page, link);
1985 kvm_mmu_zap_page(vcpu->kvm, sp);
Avi Kivity8d2d73b2008-06-04 18:42:24 +03001986 cond_resched();
Avi Kivityf51234c2007-01-05 16:36:52 -08001987 }
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001988 free_page((unsigned long)vcpu->arch.mmu.pae_root);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001989}
1990
1991static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1992{
Avi Kivity17ac10a2007-01-05 16:36:40 -08001993 struct page *page;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001994 int i;
1995
1996 ASSERT(vcpu);
1997
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08001998 if (vcpu->kvm->arch.n_requested_mmu_pages)
1999 vcpu->kvm->arch.n_free_mmu_pages =
2000 vcpu->kvm->arch.n_requested_mmu_pages;
Izik Eidus82ce2c92007-10-02 18:52:55 +02002001 else
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002002 vcpu->kvm->arch.n_free_mmu_pages =
2003 vcpu->kvm->arch.n_alloc_mmu_pages;
Avi Kivity17ac10a2007-01-05 16:36:40 -08002004 /*
2005 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2006 * Therefore we need to allocate shadow page tables in the first
2007 * 4GB of memory, which happens to fit the DMA32 zone.
2008 */
2009 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2010 if (!page)
2011 goto error_1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002012 vcpu->arch.mmu.pae_root = page_address(page);
Avi Kivity17ac10a2007-01-05 16:36:40 -08002013 for (i = 0; i < 4; ++i)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002014 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
Avi Kivity17ac10a2007-01-05 16:36:40 -08002015
Avi Kivity6aa8b732006-12-10 02:21:36 -08002016 return 0;
2017
2018error_1:
2019 free_mmu_pages(vcpu);
2020 return -ENOMEM;
2021}
2022
Ingo Molnar8018c272006-12-29 16:50:01 -08002023int kvm_mmu_create(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002024{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002025 ASSERT(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002026 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
Avi Kivity6aa8b732006-12-10 02:21:36 -08002027
Ingo Molnar8018c272006-12-29 16:50:01 -08002028 return alloc_mmu_pages(vcpu);
2029}
Avi Kivity6aa8b732006-12-10 02:21:36 -08002030
Ingo Molnar8018c272006-12-29 16:50:01 -08002031int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2032{
2033 ASSERT(vcpu);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002034 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
Avi Kivity2c264952006-12-22 01:05:28 -08002035
Ingo Molnar8018c272006-12-29 16:50:01 -08002036 return init_kvm_mmu(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002037}
2038
2039void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2040{
2041 ASSERT(vcpu);
2042
2043 destroy_kvm_mmu(vcpu);
2044 free_mmu_pages(vcpu);
Avi Kivity714b93d2007-01-05 16:36:53 -08002045 mmu_free_memory_caches(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002046}
2047
Avi Kivity90cb0522007-07-17 13:04:56 +03002048void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002049{
Avi Kivity4db35312007-11-21 15:28:32 +02002050 struct kvm_mmu_page *sp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002051
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002052 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002053 int i;
2054 u64 *pt;
2055
Avi Kivity4db35312007-11-21 15:28:32 +02002056 if (!test_bit(slot, &sp->slot_bitmap))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002057 continue;
2058
Avi Kivity4db35312007-11-21 15:28:32 +02002059 pt = sp->spt;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002060 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2061 /* avoid RMW */
Izik Eidus9647c142007-10-16 14:43:46 +02002062 if (pt[i] & PT_WRITABLE_MASK)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002063 pt[i] &= ~PT_WRITABLE_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002064 }
2065}
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002066
Avi Kivity90cb0522007-07-17 13:04:56 +03002067void kvm_mmu_zap_all(struct kvm *kvm)
Dor Laore0fa8262007-03-30 13:06:33 +03002068{
Avi Kivity4db35312007-11-21 15:28:32 +02002069 struct kvm_mmu_page *sp, *node;
Dor Laore0fa8262007-03-30 13:06:33 +03002070
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05002071 spin_lock(&kvm->mmu_lock);
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002072 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
Avi Kivity4db35312007-11-21 15:28:32 +02002073 kvm_mmu_zap_page(kvm, sp);
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -05002074 spin_unlock(&kvm->mmu_lock);
Dor Laore0fa8262007-03-30 13:06:33 +03002075
Avi Kivity90cb0522007-07-17 13:04:56 +03002076 kvm_flush_remote_tlbs(kvm);
Dor Laore0fa8262007-03-30 13:06:33 +03002077}
2078
Harvey Harrison8b2cf732008-04-27 12:14:13 -07002079static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
Izik Eidus3ee16c82008-03-30 15:17:21 +03002080{
2081 struct kvm_mmu_page *page;
2082
2083 page = container_of(kvm->arch.active_mmu_pages.prev,
2084 struct kvm_mmu_page, link);
2085 kvm_mmu_zap_page(kvm, page);
2086}
2087
2088static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2089{
2090 struct kvm *kvm;
2091 struct kvm *kvm_freed = NULL;
2092 int cache_count = 0;
2093
2094 spin_lock(&kvm_lock);
2095
2096 list_for_each_entry(kvm, &vm_list, vm_list) {
2097 int npages;
2098
Marcelo Tosatti5a4c9282008-07-03 18:33:02 -03002099 if (!down_read_trylock(&kvm->slots_lock))
2100 continue;
Izik Eidus3ee16c82008-03-30 15:17:21 +03002101 spin_lock(&kvm->mmu_lock);
2102 npages = kvm->arch.n_alloc_mmu_pages -
2103 kvm->arch.n_free_mmu_pages;
2104 cache_count += npages;
2105 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2106 kvm_mmu_remove_one_alloc_mmu_page(kvm);
2107 cache_count--;
2108 kvm_freed = kvm;
2109 }
2110 nr_to_scan--;
2111
2112 spin_unlock(&kvm->mmu_lock);
Marcelo Tosatti5a4c9282008-07-03 18:33:02 -03002113 up_read(&kvm->slots_lock);
Izik Eidus3ee16c82008-03-30 15:17:21 +03002114 }
2115 if (kvm_freed)
2116 list_move_tail(&kvm_freed->vm_list, &vm_list);
2117
2118 spin_unlock(&kvm_lock);
2119
2120 return cache_count;
2121}
2122
2123static struct shrinker mmu_shrinker = {
2124 .shrink = mmu_shrink,
2125 .seeks = DEFAULT_SEEKS * 10,
2126};
2127
Ingo Molnar2ddfd202008-05-22 10:37:48 +02002128static void mmu_destroy_caches(void)
Avi Kivityb5a33a72007-04-15 16:31:09 +03002129{
2130 if (pte_chain_cache)
2131 kmem_cache_destroy(pte_chain_cache);
2132 if (rmap_desc_cache)
2133 kmem_cache_destroy(rmap_desc_cache);
Avi Kivityd3d25b02007-05-30 12:34:53 +03002134 if (mmu_page_header_cache)
2135 kmem_cache_destroy(mmu_page_header_cache);
Avi Kivityb5a33a72007-04-15 16:31:09 +03002136}
2137
Izik Eidus3ee16c82008-03-30 15:17:21 +03002138void kvm_mmu_module_exit(void)
2139{
2140 mmu_destroy_caches();
2141 unregister_shrinker(&mmu_shrinker);
2142}
2143
Avi Kivityb5a33a72007-04-15 16:31:09 +03002144int kvm_mmu_module_init(void)
2145{
2146 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2147 sizeof(struct kvm_pte_chain),
Paul Mundt20c2df82007-07-20 10:11:58 +09002148 0, 0, NULL);
Avi Kivityb5a33a72007-04-15 16:31:09 +03002149 if (!pte_chain_cache)
2150 goto nomem;
2151 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2152 sizeof(struct kvm_rmap_desc),
Paul Mundt20c2df82007-07-20 10:11:58 +09002153 0, 0, NULL);
Avi Kivityb5a33a72007-04-15 16:31:09 +03002154 if (!rmap_desc_cache)
2155 goto nomem;
2156
Avi Kivityd3d25b02007-05-30 12:34:53 +03002157 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2158 sizeof(struct kvm_mmu_page),
Paul Mundt20c2df82007-07-20 10:11:58 +09002159 0, 0, NULL);
Avi Kivityd3d25b02007-05-30 12:34:53 +03002160 if (!mmu_page_header_cache)
2161 goto nomem;
2162
Izik Eidus3ee16c82008-03-30 15:17:21 +03002163 register_shrinker(&mmu_shrinker);
2164
Avi Kivityb5a33a72007-04-15 16:31:09 +03002165 return 0;
2166
2167nomem:
Izik Eidus3ee16c82008-03-30 15:17:21 +03002168 mmu_destroy_caches();
Avi Kivityb5a33a72007-04-15 16:31:09 +03002169 return -ENOMEM;
2170}
2171
Zhang Xiantao3ad82a72007-11-20 13:11:38 +08002172/*
2173 * Caculate mmu pages needed for kvm.
2174 */
2175unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2176{
2177 int i;
2178 unsigned int nr_mmu_pages;
2179 unsigned int nr_pages = 0;
2180
2181 for (i = 0; i < kvm->nmemslots; i++)
2182 nr_pages += kvm->memslots[i].npages;
2183
2184 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2185 nr_mmu_pages = max(nr_mmu_pages,
2186 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2187
2188 return nr_mmu_pages;
2189}
2190
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05002191static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2192 unsigned len)
2193{
2194 if (len > buffer->len)
2195 return NULL;
2196 return buffer->ptr;
2197}
2198
2199static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2200 unsigned len)
2201{
2202 void *ret;
2203
2204 ret = pv_mmu_peek_buffer(buffer, len);
2205 if (!ret)
2206 return ret;
2207 buffer->ptr += len;
2208 buffer->len -= len;
2209 buffer->processed += len;
2210 return ret;
2211}
2212
2213static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2214 gpa_t addr, gpa_t value)
2215{
2216 int bytes = 8;
2217 int r;
2218
2219 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2220 bytes = 4;
2221
2222 r = mmu_topup_memory_caches(vcpu);
2223 if (r)
2224 return r;
2225
Marcelo Tosatti3200f402008-03-29 20:17:59 -03002226 if (!emulator_write_phys(vcpu, addr, &value, bytes))
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05002227 return -EFAULT;
2228
2229 return 1;
2230}
2231
2232static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2233{
2234 kvm_x86_ops->tlb_flush(vcpu);
2235 return 1;
2236}
2237
2238static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2239{
2240 spin_lock(&vcpu->kvm->mmu_lock);
2241 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2242 spin_unlock(&vcpu->kvm->mmu_lock);
2243 return 1;
2244}
2245
2246static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2247 struct kvm_pv_mmu_op_buffer *buffer)
2248{
2249 struct kvm_mmu_op_header *header;
2250
2251 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2252 if (!header)
2253 return 0;
2254 switch (header->op) {
2255 case KVM_MMU_OP_WRITE_PTE: {
2256 struct kvm_mmu_op_write_pte *wpte;
2257
2258 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2259 if (!wpte)
2260 return 0;
2261 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2262 wpte->pte_val);
2263 }
2264 case KVM_MMU_OP_FLUSH_TLB: {
2265 struct kvm_mmu_op_flush_tlb *ftlb;
2266
2267 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2268 if (!ftlb)
2269 return 0;
2270 return kvm_pv_mmu_flush_tlb(vcpu);
2271 }
2272 case KVM_MMU_OP_RELEASE_PT: {
2273 struct kvm_mmu_op_release_pt *rpt;
2274
2275 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2276 if (!rpt)
2277 return 0;
2278 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2279 }
2280 default: return 0;
2281 }
2282}
2283
2284int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2285 gpa_t addr, unsigned long *ret)
2286{
2287 int r;
Dave Hansen6ad18fb2008-08-11 10:01:49 -07002288 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05002289
Dave Hansen6ad18fb2008-08-11 10:01:49 -07002290 buffer->ptr = buffer->buf;
2291 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
2292 buffer->processed = 0;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05002293
Dave Hansen6ad18fb2008-08-11 10:01:49 -07002294 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05002295 if (r)
2296 goto out;
2297
Dave Hansen6ad18fb2008-08-11 10:01:49 -07002298 while (buffer->len) {
2299 r = kvm_pv_mmu_op_one(vcpu, buffer);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05002300 if (r < 0)
2301 goto out;
2302 if (r == 0)
2303 break;
2304 }
2305
2306 r = 1;
2307out:
Dave Hansen6ad18fb2008-08-11 10:01:49 -07002308 *ret = buffer->processed;
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -05002309 return r;
2310}
2311
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002312#ifdef AUDIT
2313
2314static const char *audit_msg;
2315
2316static gva_t canonicalize(gva_t gva)
2317{
2318#ifdef CONFIG_X86_64
2319 gva = (long long)(gva << 16) >> 16;
2320#endif
2321 return gva;
2322}
2323
2324static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2325 gva_t va, int level)
2326{
2327 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2328 int i;
2329 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2330
2331 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2332 u64 ent = pt[i];
2333
Avi Kivityc7addb92007-09-16 18:58:32 +02002334 if (ent == shadow_trap_nonpresent_pte)
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002335 continue;
2336
2337 va = canonicalize(va);
Avi Kivityc7addb92007-09-16 18:58:32 +02002338 if (level > 1) {
2339 if (ent == shadow_notrap_nonpresent_pte)
2340 printk(KERN_ERR "audit: (%s) nontrapping pte"
2341 " in nonleaf level: levels %d gva %lx"
2342 " level %d pte %llx\n", audit_msg,
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002343 vcpu->arch.mmu.root_level, va, level, ent);
Avi Kivityc7addb92007-09-16 18:58:32 +02002344
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002345 audit_mappings_page(vcpu, ent, va, level - 1);
Avi Kivityc7addb92007-09-16 18:58:32 +02002346 } else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002347 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
Anthony Liguori35149e22008-04-02 14:46:56 -05002348 hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002349
Avi Kivityc7addb92007-09-16 18:58:32 +02002350 if (is_shadow_present_pte(ent)
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002351 && (ent & PT64_BASE_ADDR_MASK) != hpa)
Avi Kivityc7addb92007-09-16 18:58:32 +02002352 printk(KERN_ERR "xx audit error: (%s) levels %d"
2353 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002354 audit_msg, vcpu->arch.mmu.root_level,
Mike Dayd77c26f2007-10-08 09:02:08 -04002355 va, gpa, hpa, ent,
2356 is_shadow_present_pte(ent));
Avi Kivityc7addb92007-09-16 18:58:32 +02002357 else if (ent == shadow_notrap_nonpresent_pte
2358 && !is_error_hpa(hpa))
2359 printk(KERN_ERR "audit: (%s) notrap shadow,"
2360 " valid guest gva %lx\n", audit_msg, va);
Anthony Liguori35149e22008-04-02 14:46:56 -05002361 kvm_release_pfn_clean(pfn);
Avi Kivityc7addb92007-09-16 18:58:32 +02002362
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002363 }
2364 }
2365}
2366
2367static void audit_mappings(struct kvm_vcpu *vcpu)
2368{
Avi Kivity1ea252a2007-03-08 11:48:09 +02002369 unsigned i;
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002370
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002371 if (vcpu->arch.mmu.root_level == 4)
2372 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002373 else
2374 for (i = 0; i < 4; ++i)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002375 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002376 audit_mappings_page(vcpu,
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002377 vcpu->arch.mmu.pae_root[i],
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002378 i << 30,
2379 2);
2380}
2381
2382static int count_rmaps(struct kvm_vcpu *vcpu)
2383{
2384 int nmaps = 0;
2385 int i, j, k;
2386
2387 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2388 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2389 struct kvm_rmap_desc *d;
2390
2391 for (j = 0; j < m->npages; ++j) {
Izik Eidus290fc382007-09-27 14:11:22 +02002392 unsigned long *rmapp = &m->rmap[j];
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002393
Izik Eidus290fc382007-09-27 14:11:22 +02002394 if (!*rmapp)
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002395 continue;
Izik Eidus290fc382007-09-27 14:11:22 +02002396 if (!(*rmapp & 1)) {
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002397 ++nmaps;
2398 continue;
2399 }
Izik Eidus290fc382007-09-27 14:11:22 +02002400 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002401 while (d) {
2402 for (k = 0; k < RMAP_EXT; ++k)
2403 if (d->shadow_ptes[k])
2404 ++nmaps;
2405 else
2406 break;
2407 d = d->more;
2408 }
2409 }
2410 }
2411 return nmaps;
2412}
2413
2414static int count_writable_mappings(struct kvm_vcpu *vcpu)
2415{
2416 int nmaps = 0;
Avi Kivity4db35312007-11-21 15:28:32 +02002417 struct kvm_mmu_page *sp;
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002418 int i;
2419
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002420 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
Avi Kivity4db35312007-11-21 15:28:32 +02002421 u64 *pt = sp->spt;
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002422
Avi Kivity4db35312007-11-21 15:28:32 +02002423 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002424 continue;
2425
2426 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2427 u64 ent = pt[i];
2428
2429 if (!(ent & PT_PRESENT_MASK))
2430 continue;
2431 if (!(ent & PT_WRITABLE_MASK))
2432 continue;
2433 ++nmaps;
2434 }
2435 }
2436 return nmaps;
2437}
2438
2439static void audit_rmap(struct kvm_vcpu *vcpu)
2440{
2441 int n_rmap = count_rmaps(vcpu);
2442 int n_actual = count_writable_mappings(vcpu);
2443
2444 if (n_rmap != n_actual)
2445 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002446 __func__, audit_msg, n_rmap, n_actual);
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002447}
2448
2449static void audit_write_protection(struct kvm_vcpu *vcpu)
2450{
Avi Kivity4db35312007-11-21 15:28:32 +02002451 struct kvm_mmu_page *sp;
Izik Eidus290fc382007-09-27 14:11:22 +02002452 struct kvm_memory_slot *slot;
2453 unsigned long *rmapp;
2454 gfn_t gfn;
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002455
Zhang Xiantaof05e70a2007-12-14 10:01:48 +08002456 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
Avi Kivity4db35312007-11-21 15:28:32 +02002457 if (sp->role.metaphysical)
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002458 continue;
2459
Avi Kivity4db35312007-11-21 15:28:32 +02002460 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2461 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
Izik Eidus290fc382007-09-27 14:11:22 +02002462 rmapp = &slot->rmap[gfn - slot->base_gfn];
2463 if (*rmapp)
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002464 printk(KERN_ERR "%s: (%s) shadow page has writable"
2465 " mappings: gfn %lx role %x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002466 __func__, audit_msg, sp->gfn,
Avi Kivity4db35312007-11-21 15:28:32 +02002467 sp->role.word);
Avi Kivity37a7d8b2007-01-05 16:36:56 -08002468 }
2469}
2470
2471static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2472{
2473 int olddbg = dbg;
2474
2475 dbg = 0;
2476 audit_msg = msg;
2477 audit_rmap(vcpu);
2478 audit_write_protection(vcpu);
2479 audit_mappings(vcpu);
2480 dbg = olddbg;
2481}
2482
2483#endif