Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Kernel-based Virtual Machine driver for Linux |
| 3 | * |
| 4 | * This module enables machines with Intel VT-x extensions to run virtual |
| 5 | * machines without emulation or binary translation. |
| 6 | * |
| 7 | * MMU support |
| 8 | * |
| 9 | * Copyright (C) 2006 Qumranet, Inc. |
Nicolas Kaiser | 9611c18 | 2010-10-06 14:23:22 +0200 | [diff] [blame] | 10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 11 | * |
| 12 | * Authors: |
| 13 | * Yaniv Kamay <yaniv@qumranet.com> |
| 14 | * Avi Kivity <avi@qumranet.com> |
| 15 | * |
| 16 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 17 | * the COPYING file in the top-level directory. |
| 18 | * |
| 19 | */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 20 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 21 | #include "irq.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 22 | #include "mmu.h" |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 23 | #include "x86.h" |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 24 | #include "kvm_cache_regs.h" |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 25 | #include "x86.h" |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 26 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 27 | #include <linux/kvm_host.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 28 | #include <linux/types.h> |
| 29 | #include <linux/string.h> |
| 30 | #include <linux/mm.h> |
| 31 | #include <linux/highmem.h> |
| 32 | #include <linux/module.h> |
Izik Eidus | 448353c | 2007-11-26 14:08:14 +0200 | [diff] [blame] | 33 | #include <linux/swap.h> |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 34 | #include <linux/hugetlb.h> |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 35 | #include <linux/compiler.h> |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 36 | #include <linux/srcu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 37 | #include <linux/slab.h> |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 38 | #include <linux/uaccess.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 39 | |
| 40 | #include <asm/page.h> |
| 41 | #include <asm/cmpxchg.h> |
Avi Kivity | 4e54237 | 2007-11-21 14:08:40 +0200 | [diff] [blame] | 42 | #include <asm/io.h> |
Eduardo Habkost | 13673a9 | 2008-11-17 19:03:13 -0200 | [diff] [blame] | 43 | #include <asm/vmx.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 44 | |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 45 | /* |
| 46 | * When setting this variable to true it enables Two-Dimensional-Paging |
| 47 | * where the hardware walks 2 page tables: |
| 48 | * 1. the guest-virtual to guest-physical |
| 49 | * 2. while doing 1. it walks guest-physical to host-physical |
| 50 | * If the hardware supports that we don't need to do shadow paging. |
| 51 | */ |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 52 | bool tdp_enabled = false; |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 53 | |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 54 | enum { |
| 55 | AUDIT_PRE_PAGE_FAULT, |
| 56 | AUDIT_POST_PAGE_FAULT, |
| 57 | AUDIT_PRE_PTE_WRITE, |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 58 | AUDIT_POST_PTE_WRITE, |
| 59 | AUDIT_PRE_SYNC, |
| 60 | AUDIT_POST_SYNC |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 61 | }; |
| 62 | |
| 63 | char *audit_point_name[] = { |
| 64 | "pre page fault", |
| 65 | "post page fault", |
| 66 | "pre pte write", |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 67 | "post pte write", |
| 68 | "pre sync", |
| 69 | "post sync" |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 70 | }; |
| 71 | |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 72 | #undef MMU_DEBUG |
| 73 | |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 74 | #ifdef MMU_DEBUG |
| 75 | |
| 76 | #define pgprintk(x...) do { if (dbg) printk(x); } while (0) |
| 77 | #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) |
| 78 | |
| 79 | #else |
| 80 | |
| 81 | #define pgprintk(x...) do { } while (0) |
| 82 | #define rmap_printk(x...) do { } while (0) |
| 83 | |
| 84 | #endif |
| 85 | |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 86 | #ifdef MMU_DEBUG |
Avi Kivity | 6ada8cc | 2008-06-22 16:45:24 +0300 | [diff] [blame] | 87 | static int dbg = 0; |
| 88 | module_param(dbg, bool, 0644); |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 89 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 90 | |
Marcelo Tosatti | 582801a | 2008-09-23 13:18:41 -0300 | [diff] [blame] | 91 | static int oos_shadow = 1; |
| 92 | module_param(oos_shadow, bool, 0644); |
| 93 | |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 94 | #ifndef MMU_DEBUG |
| 95 | #define ASSERT(x) do { } while (0) |
| 96 | #else |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 97 | #define ASSERT(x) \ |
| 98 | if (!(x)) { \ |
| 99 | printk(KERN_WARNING "assertion failed %s:%d: %s\n", \ |
| 100 | __FILE__, __LINE__, #x); \ |
| 101 | } |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 102 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 103 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 104 | #define PTE_PREFETCH_NUM 8 |
| 105 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 106 | #define PT_FIRST_AVAIL_BITS_SHIFT 9 |
| 107 | #define PT64_SECOND_AVAIL_BITS_SHIFT 52 |
| 108 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 109 | #define PT64_LEVEL_BITS 9 |
| 110 | |
| 111 | #define PT64_LEVEL_SHIFT(level) \ |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 112 | (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 113 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 114 | #define PT64_INDEX(address, level)\ |
| 115 | (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) |
| 116 | |
| 117 | |
| 118 | #define PT32_LEVEL_BITS 10 |
| 119 | |
| 120 | #define PT32_LEVEL_SHIFT(level) \ |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 121 | (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 122 | |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 123 | #define PT32_LVL_OFFSET_MASK(level) \ |
| 124 | (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 125 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 126 | |
| 127 | #define PT32_INDEX(address, level)\ |
| 128 | (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) |
| 129 | |
| 130 | |
Avi Kivity | 27aba76 | 2007-03-09 13:04:31 +0200 | [diff] [blame] | 131 | #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 132 | #define PT64_DIR_BASE_ADDR_MASK \ |
| 133 | (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 134 | #define PT64_LVL_ADDR_MASK(level) \ |
| 135 | (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 136 | * PT64_LEVEL_BITS))) - 1)) |
| 137 | #define PT64_LVL_OFFSET_MASK(level) \ |
| 138 | (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 139 | * PT64_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 140 | |
| 141 | #define PT32_BASE_ADDR_MASK PAGE_MASK |
| 142 | #define PT32_DIR_BASE_ADDR_MASK \ |
| 143 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 144 | #define PT32_LVL_ADDR_MASK(level) \ |
| 145 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 146 | * PT32_LEVEL_BITS))) - 1)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 147 | |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 148 | #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ |
| 149 | | PT64_NX_MASK) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 150 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 151 | #define PTE_LIST_EXT 4 |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 152 | |
Avi Kivity | fe135d2 | 2007-12-09 16:15:46 +0200 | [diff] [blame] | 153 | #define ACC_EXEC_MASK 1 |
| 154 | #define ACC_WRITE_MASK PT_WRITABLE_MASK |
| 155 | #define ACC_USER_MASK PT_USER_MASK |
| 156 | #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) |
| 157 | |
Avi Kivity | 90bb6fc | 2009-12-31 12:10:16 +0200 | [diff] [blame] | 158 | #include <trace/events/kvm.h> |
| 159 | |
Avi Kivity | 0742017 | 2009-07-06 12:21:32 +0300 | [diff] [blame] | 160 | #define CREATE_TRACE_POINTS |
| 161 | #include "mmutrace.h" |
| 162 | |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 163 | #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) |
| 164 | |
Avi Kivity | 135f8c2 | 2008-08-21 17:49:56 +0300 | [diff] [blame] | 165 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
| 166 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 167 | struct pte_list_desc { |
| 168 | u64 *sptes[PTE_LIST_EXT]; |
| 169 | struct pte_list_desc *more; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 170 | }; |
| 171 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 172 | struct kvm_shadow_walk_iterator { |
| 173 | u64 addr; |
| 174 | hpa_t shadow_addr; |
| 175 | int level; |
| 176 | u64 *sptep; |
| 177 | unsigned index; |
| 178 | }; |
| 179 | |
| 180 | #define for_each_shadow_entry(_vcpu, _addr, _walker) \ |
| 181 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 182 | shadow_walk_okay(&(_walker)); \ |
| 183 | shadow_walk_next(&(_walker))) |
| 184 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 185 | static struct kmem_cache *pte_list_desc_cache; |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 186 | static struct kmem_cache *mmu_page_header_cache; |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 187 | static struct percpu_counter kvm_total_used_mmu_pages; |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 188 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 189 | static u64 __read_mostly shadow_trap_nonpresent_pte; |
| 190 | static u64 __read_mostly shadow_notrap_nonpresent_pte; |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 191 | static u64 __read_mostly shadow_nx_mask; |
| 192 | static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ |
| 193 | static u64 __read_mostly shadow_user_mask; |
| 194 | static u64 __read_mostly shadow_accessed_mask; |
| 195 | static u64 __read_mostly shadow_dirty_mask; |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 196 | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 197 | static inline u64 rsvd_bits(int s, int e) |
| 198 | { |
| 199 | return ((1ULL << (e - s + 1)) - 1) << s; |
| 200 | } |
| 201 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 202 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) |
| 203 | { |
| 204 | shadow_trap_nonpresent_pte = trap_pte; |
| 205 | shadow_notrap_nonpresent_pte = notrap_pte; |
| 206 | } |
| 207 | EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); |
| 208 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 209 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
Sheng Yang | 4b12f0d | 2009-04-27 20:35:42 +0800 | [diff] [blame] | 210 | u64 dirty_mask, u64 nx_mask, u64 x_mask) |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 211 | { |
| 212 | shadow_user_mask = user_mask; |
| 213 | shadow_accessed_mask = accessed_mask; |
| 214 | shadow_dirty_mask = dirty_mask; |
| 215 | shadow_nx_mask = nx_mask; |
| 216 | shadow_x_mask = x_mask; |
| 217 | } |
| 218 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
| 219 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 220 | static int is_cpuid_PSE36(void) |
| 221 | { |
| 222 | return 1; |
| 223 | } |
| 224 | |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 225 | static int is_nx(struct kvm_vcpu *vcpu) |
| 226 | { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 227 | return vcpu->arch.efer & EFER_NX; |
Avi Kivity | 73b1087 | 2007-01-26 00:56:41 -0800 | [diff] [blame] | 228 | } |
| 229 | |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 230 | static int is_shadow_present_pte(u64 pte) |
| 231 | { |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 232 | return pte != shadow_trap_nonpresent_pte |
| 233 | && pte != shadow_notrap_nonpresent_pte; |
| 234 | } |
| 235 | |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 236 | static int is_large_pte(u64 pte) |
| 237 | { |
| 238 | return pte & PT_PAGE_SIZE_MASK; |
| 239 | } |
| 240 | |
Avi Kivity | 43a3795 | 2009-06-10 14:12:05 +0300 | [diff] [blame] | 241 | static int is_dirty_gpte(unsigned long pte) |
Avi Kivity | e3c5e7ec | 2007-10-11 12:32:30 +0200 | [diff] [blame] | 242 | { |
Avi Kivity | 439e218 | 2009-06-10 12:56:54 +0300 | [diff] [blame] | 243 | return pte & PT_DIRTY_MASK; |
Avi Kivity | e3c5e7ec | 2007-10-11 12:32:30 +0200 | [diff] [blame] | 244 | } |
| 245 | |
Avi Kivity | 43a3795 | 2009-06-10 14:12:05 +0300 | [diff] [blame] | 246 | static int is_rmap_spte(u64 pte) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 247 | { |
Avi Kivity | 4b1a80f | 2008-03-23 12:18:19 +0200 | [diff] [blame] | 248 | return is_shadow_present_pte(pte); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 249 | } |
| 250 | |
Marcelo Tosatti | 776e663 | 2009-06-10 12:27:03 -0300 | [diff] [blame] | 251 | static int is_last_spte(u64 pte, int level) |
| 252 | { |
| 253 | if (level == PT_PAGE_TABLE_LEVEL) |
| 254 | return 1; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 255 | if (is_large_pte(pte)) |
Marcelo Tosatti | 776e663 | 2009-06-10 12:27:03 -0300 | [diff] [blame] | 256 | return 1; |
| 257 | return 0; |
| 258 | } |
| 259 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 260 | static pfn_t spte_to_pfn(u64 pte) |
Avi Kivity | 0b49ea8 | 2008-03-23 15:06:23 +0200 | [diff] [blame] | 261 | { |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 262 | return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
Avi Kivity | 0b49ea8 | 2008-03-23 15:06:23 +0200 | [diff] [blame] | 263 | } |
| 264 | |
Avi Kivity | da92852 | 2007-11-21 13:54:47 +0200 | [diff] [blame] | 265 | static gfn_t pse36_gfn_delta(u32 gpte) |
| 266 | { |
| 267 | int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; |
| 268 | |
| 269 | return (gpte & PT32_DIR_PSE36_MASK) << shift; |
| 270 | } |
| 271 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 272 | static void __set_spte(u64 *sptep, u64 spte) |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 273 | { |
H. Peter Anvin | 7645e43 | 2010-08-06 12:18:11 -0700 | [diff] [blame] | 274 | set_64bit(sptep, spte); |
Avi Kivity | e663ee6 | 2007-05-31 15:46:04 +0300 | [diff] [blame] | 275 | } |
| 276 | |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 277 | static u64 __xchg_spte(u64 *sptep, u64 new_spte) |
| 278 | { |
| 279 | #ifdef CONFIG_X86_64 |
| 280 | return xchg(sptep, new_spte); |
| 281 | #else |
| 282 | u64 old_spte; |
| 283 | |
| 284 | do { |
| 285 | old_spte = *sptep; |
| 286 | } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte); |
| 287 | |
| 288 | return old_spte; |
| 289 | #endif |
| 290 | } |
| 291 | |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 292 | static bool spte_has_volatile_bits(u64 spte) |
| 293 | { |
| 294 | if (!shadow_accessed_mask) |
| 295 | return false; |
| 296 | |
| 297 | if (!is_shadow_present_pte(spte)) |
| 298 | return false; |
| 299 | |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 300 | if ((spte & shadow_accessed_mask) && |
| 301 | (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 302 | return false; |
| 303 | |
| 304 | return true; |
| 305 | } |
| 306 | |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 307 | static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask) |
| 308 | { |
| 309 | return (old_spte & bit_mask) && !(new_spte & bit_mask); |
| 310 | } |
| 311 | |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 312 | static void update_spte(u64 *sptep, u64 new_spte) |
| 313 | { |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 314 | u64 mask, old_spte = *sptep; |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 315 | |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 316 | WARN_ON(!is_rmap_spte(new_spte)); |
| 317 | |
| 318 | new_spte |= old_spte & shadow_dirty_mask; |
| 319 | |
| 320 | mask = shadow_accessed_mask; |
| 321 | if (is_writable_pte(old_spte)) |
| 322 | mask |= shadow_dirty_mask; |
| 323 | |
| 324 | if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask) |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 325 | __set_spte(sptep, new_spte); |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 326 | else |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 327 | old_spte = __xchg_spte(sptep, new_spte); |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 328 | |
| 329 | if (!shadow_accessed_mask) |
| 330 | return; |
| 331 | |
| 332 | if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) |
| 333 | kvm_set_pfn_accessed(spte_to_pfn(old_spte)); |
| 334 | if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) |
| 335 | kvm_set_pfn_dirty(spte_to_pfn(old_spte)); |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 336 | } |
| 337 | |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 338 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 339 | struct kmem_cache *base_cache, int min) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 340 | { |
| 341 | void *obj; |
| 342 | |
| 343 | if (cache->nobjs >= min) |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 344 | return 0; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 345 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 346 | obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 347 | if (!obj) |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 348 | return -ENOMEM; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 349 | cache->objects[cache->nobjs++] = obj; |
| 350 | } |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 351 | return 0; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 352 | } |
| 353 | |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 354 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, |
| 355 | struct kmem_cache *cache) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 356 | { |
| 357 | while (mc->nobjs) |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 358 | kmem_cache_free(cache, mc->objects[--mc->nobjs]); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 359 | } |
| 360 | |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 361 | static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 362 | int min) |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 363 | { |
Xiao Guangrong | 842f22e | 2011-03-04 19:01:10 +0800 | [diff] [blame] | 364 | void *page; |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 365 | |
| 366 | if (cache->nobjs >= min) |
| 367 | return 0; |
| 368 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
Xiao Guangrong | 842f22e | 2011-03-04 19:01:10 +0800 | [diff] [blame] | 369 | page = (void *)__get_free_page(GFP_KERNEL); |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 370 | if (!page) |
| 371 | return -ENOMEM; |
Xiao Guangrong | 842f22e | 2011-03-04 19:01:10 +0800 | [diff] [blame] | 372 | cache->objects[cache->nobjs++] = page; |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 373 | } |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) |
| 378 | { |
| 379 | while (mc->nobjs) |
Avi Kivity | c4d198d | 2007-07-21 09:06:46 +0300 | [diff] [blame] | 380 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
Avi Kivity | c1158e6 | 2007-07-20 08:18:27 +0300 | [diff] [blame] | 381 | } |
| 382 | |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 383 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) |
| 384 | { |
| 385 | int r; |
| 386 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 387 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 388 | pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 389 | if (r) |
| 390 | goto out; |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 391 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 392 | if (r) |
| 393 | goto out; |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 394 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, |
Avi Kivity | 2e3e588 | 2007-09-10 11:28:17 +0300 | [diff] [blame] | 395 | mmu_page_header_cache, 4); |
| 396 | out: |
Avi Kivity | 8c43850 | 2007-04-16 11:53:17 +0300 | [diff] [blame] | 397 | return r; |
| 398 | } |
| 399 | |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 400 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 401 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 402 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
| 403 | pte_list_desc_cache); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 404 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 405 | mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, |
| 406 | mmu_page_header_cache); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, |
| 410 | size_t size) |
| 411 | { |
| 412 | void *p; |
| 413 | |
| 414 | BUG_ON(!mc->nobjs); |
| 415 | p = mc->objects[--mc->nobjs]; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 416 | return p; |
| 417 | } |
| 418 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 419 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 420 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 421 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache, |
| 422 | sizeof(struct pte_list_desc)); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 423 | } |
| 424 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 425 | static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 426 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 427 | kmem_cache_free(pte_list_desc_cache, pte_list_desc); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 428 | } |
| 429 | |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 430 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) |
| 431 | { |
| 432 | if (!sp->role.direct) |
| 433 | return sp->gfns[index]; |
| 434 | |
| 435 | return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); |
| 436 | } |
| 437 | |
| 438 | static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) |
| 439 | { |
| 440 | if (sp->role.direct) |
| 441 | BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); |
| 442 | else |
| 443 | sp->gfns[index] = gfn; |
| 444 | } |
| 445 | |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 446 | /* |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 447 | * Return the pointer to the large page information for a given gfn, |
| 448 | * handling slots that are not large page aligned. |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 449 | */ |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 450 | static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, |
| 451 | struct kvm_memory_slot *slot, |
| 452 | int level) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 453 | { |
| 454 | unsigned long idx; |
| 455 | |
Joerg Roedel | 8285541 | 2010-07-01 16:00:11 +0200 | [diff] [blame] | 456 | idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - |
| 457 | (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 458 | return &slot->lpage_info[level - 2][idx]; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | static void account_shadowed(struct kvm *kvm, gfn_t gfn) |
| 462 | { |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 463 | struct kvm_memory_slot *slot; |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 464 | struct kvm_lpage_info *linfo; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 465 | int i; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 466 | |
Avi Kivity | a1f4d39 | 2010-06-21 11:44:20 +0300 | [diff] [blame] | 467 | slot = gfn_to_memslot(kvm, gfn); |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 468 | for (i = PT_DIRECTORY_LEVEL; |
| 469 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 470 | linfo = lpage_info_slot(gfn, slot, i); |
| 471 | linfo->write_count += 1; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 472 | } |
Xiao Guangrong | 332b207 | 2011-05-15 23:20:27 +0800 | [diff] [blame] | 473 | kvm->arch.indirect_shadow_pages++; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) |
| 477 | { |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 478 | struct kvm_memory_slot *slot; |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 479 | struct kvm_lpage_info *linfo; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 480 | int i; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 481 | |
Avi Kivity | a1f4d39 | 2010-06-21 11:44:20 +0300 | [diff] [blame] | 482 | slot = gfn_to_memslot(kvm, gfn); |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 483 | for (i = PT_DIRECTORY_LEVEL; |
| 484 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 485 | linfo = lpage_info_slot(gfn, slot, i); |
| 486 | linfo->write_count -= 1; |
| 487 | WARN_ON(linfo->write_count < 0); |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 488 | } |
Xiao Guangrong | 332b207 | 2011-05-15 23:20:27 +0800 | [diff] [blame] | 489 | kvm->arch.indirect_shadow_pages--; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 490 | } |
| 491 | |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 492 | static int has_wrprotected_page(struct kvm *kvm, |
| 493 | gfn_t gfn, |
| 494 | int level) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 495 | { |
Izik Eidus | 2843099 | 2008-10-03 17:40:32 +0300 | [diff] [blame] | 496 | struct kvm_memory_slot *slot; |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 497 | struct kvm_lpage_info *linfo; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 498 | |
Avi Kivity | a1f4d39 | 2010-06-21 11:44:20 +0300 | [diff] [blame] | 499 | slot = gfn_to_memslot(kvm, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 500 | if (slot) { |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 501 | linfo = lpage_info_slot(gfn, slot, level); |
| 502 | return linfo->write_count; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 503 | } |
| 504 | |
| 505 | return 1; |
| 506 | } |
| 507 | |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 508 | static int host_mapping_level(struct kvm *kvm, gfn_t gfn) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 509 | { |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 510 | unsigned long page_size; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 511 | int i, ret = 0; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 512 | |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 513 | page_size = kvm_host_page_size(kvm, gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 514 | |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 515 | for (i = PT_PAGE_TABLE_LEVEL; |
| 516 | i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { |
| 517 | if (page_size >= KVM_HPAGE_SIZE(i)) |
| 518 | ret = i; |
| 519 | else |
| 520 | break; |
| 521 | } |
| 522 | |
Marcelo Tosatti | 4c2155c | 2008-09-16 20:54:47 -0300 | [diff] [blame] | 523 | return ret; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 524 | } |
| 525 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 526 | static struct kvm_memory_slot * |
| 527 | gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 528 | bool no_dirty_log) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 529 | { |
| 530 | struct kvm_memory_slot *slot; |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 531 | |
| 532 | slot = gfn_to_memslot(vcpu->kvm, gfn); |
| 533 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID || |
| 534 | (no_dirty_log && slot->dirty_bitmap)) |
| 535 | slot = NULL; |
| 536 | |
| 537 | return slot; |
| 538 | } |
| 539 | |
| 540 | static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) |
| 541 | { |
Steve | a0a8eab | 2011-06-17 10:25:39 +0800 | [diff] [blame] | 542 | return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 543 | } |
| 544 | |
| 545 | static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) |
| 546 | { |
| 547 | int host_level, level, max_level; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 548 | |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 549 | host_level = host_mapping_level(vcpu->kvm, large_gfn); |
| 550 | |
| 551 | if (host_level == PT_PAGE_TABLE_LEVEL) |
| 552 | return host_level; |
| 553 | |
Sheng Yang | 878403b | 2010-01-05 19:02:29 +0800 | [diff] [blame] | 554 | max_level = kvm_x86_ops->get_lpage_level() < host_level ? |
| 555 | kvm_x86_ops->get_lpage_level() : host_level; |
| 556 | |
| 557 | for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 558 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) |
| 559 | break; |
Joerg Roedel | d25797b | 2009-07-27 16:30:43 +0200 | [diff] [blame] | 560 | |
| 561 | return level - 1; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 562 | } |
| 563 | |
| 564 | /* |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 565 | * Pte mapping structures: |
| 566 | * |
| 567 | * If pte_list bit zero is zero, then pte_list point to the spte. |
| 568 | * |
| 569 | * If pte_list bit zero is one, (then pte_list & ~1) points to a struct |
| 570 | * pte_list_desc containing more mappings. |
| 571 | * |
| 572 | * Returns the number of pte entries before the spte was added or zero if |
| 573 | * the spte was not added. |
| 574 | * |
| 575 | */ |
| 576 | static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, |
| 577 | unsigned long *pte_list) |
| 578 | { |
| 579 | struct pte_list_desc *desc; |
| 580 | int i, count = 0; |
| 581 | |
| 582 | if (!*pte_list) { |
| 583 | rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); |
| 584 | *pte_list = (unsigned long)spte; |
| 585 | } else if (!(*pte_list & 1)) { |
| 586 | rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); |
| 587 | desc = mmu_alloc_pte_list_desc(vcpu); |
| 588 | desc->sptes[0] = (u64 *)*pte_list; |
| 589 | desc->sptes[1] = spte; |
| 590 | *pte_list = (unsigned long)desc | 1; |
| 591 | ++count; |
| 592 | } else { |
| 593 | rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); |
| 594 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 595 | while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { |
| 596 | desc = desc->more; |
| 597 | count += PTE_LIST_EXT; |
| 598 | } |
| 599 | if (desc->sptes[PTE_LIST_EXT-1]) { |
| 600 | desc->more = mmu_alloc_pte_list_desc(vcpu); |
| 601 | desc = desc->more; |
| 602 | } |
| 603 | for (i = 0; desc->sptes[i]; ++i) |
| 604 | ++count; |
| 605 | desc->sptes[i] = spte; |
| 606 | } |
| 607 | return count; |
| 608 | } |
| 609 | |
| 610 | static u64 *pte_list_next(unsigned long *pte_list, u64 *spte) |
| 611 | { |
| 612 | struct pte_list_desc *desc; |
| 613 | u64 *prev_spte; |
| 614 | int i; |
| 615 | |
| 616 | if (!*pte_list) |
| 617 | return NULL; |
| 618 | else if (!(*pte_list & 1)) { |
| 619 | if (!spte) |
| 620 | return (u64 *)*pte_list; |
| 621 | return NULL; |
| 622 | } |
| 623 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 624 | prev_spte = NULL; |
| 625 | while (desc) { |
| 626 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { |
| 627 | if (prev_spte == spte) |
| 628 | return desc->sptes[i]; |
| 629 | prev_spte = desc->sptes[i]; |
| 630 | } |
| 631 | desc = desc->more; |
| 632 | } |
| 633 | return NULL; |
| 634 | } |
| 635 | |
| 636 | static void |
| 637 | pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, |
| 638 | int i, struct pte_list_desc *prev_desc) |
| 639 | { |
| 640 | int j; |
| 641 | |
| 642 | for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) |
| 643 | ; |
| 644 | desc->sptes[i] = desc->sptes[j]; |
| 645 | desc->sptes[j] = NULL; |
| 646 | if (j != 0) |
| 647 | return; |
| 648 | if (!prev_desc && !desc->more) |
| 649 | *pte_list = (unsigned long)desc->sptes[0]; |
| 650 | else |
| 651 | if (prev_desc) |
| 652 | prev_desc->more = desc->more; |
| 653 | else |
| 654 | *pte_list = (unsigned long)desc->more | 1; |
| 655 | mmu_free_pte_list_desc(desc); |
| 656 | } |
| 657 | |
| 658 | static void pte_list_remove(u64 *spte, unsigned long *pte_list) |
| 659 | { |
| 660 | struct pte_list_desc *desc; |
| 661 | struct pte_list_desc *prev_desc; |
| 662 | int i; |
| 663 | |
| 664 | if (!*pte_list) { |
| 665 | printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); |
| 666 | BUG(); |
| 667 | } else if (!(*pte_list & 1)) { |
| 668 | rmap_printk("pte_list_remove: %p 1->0\n", spte); |
| 669 | if ((u64 *)*pte_list != spte) { |
| 670 | printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); |
| 671 | BUG(); |
| 672 | } |
| 673 | *pte_list = 0; |
| 674 | } else { |
| 675 | rmap_printk("pte_list_remove: %p many->many\n", spte); |
| 676 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 677 | prev_desc = NULL; |
| 678 | while (desc) { |
| 679 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) |
| 680 | if (desc->sptes[i] == spte) { |
| 681 | pte_list_desc_remove_entry(pte_list, |
| 682 | desc, i, |
| 683 | prev_desc); |
| 684 | return; |
| 685 | } |
| 686 | prev_desc = desc; |
| 687 | desc = desc->more; |
| 688 | } |
| 689 | pr_err("pte_list_remove: %p many->many\n", spte); |
| 690 | BUG(); |
| 691 | } |
| 692 | } |
| 693 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 694 | typedef void (*pte_list_walk_fn) (u64 *spte); |
| 695 | static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) |
| 696 | { |
| 697 | struct pte_list_desc *desc; |
| 698 | int i; |
| 699 | |
| 700 | if (!*pte_list) |
| 701 | return; |
| 702 | |
| 703 | if (!(*pte_list & 1)) |
| 704 | return fn((u64 *)*pte_list); |
| 705 | |
| 706 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 707 | while (desc) { |
| 708 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) |
| 709 | fn(desc->sptes[i]); |
| 710 | desc = desc->more; |
| 711 | } |
| 712 | } |
| 713 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 714 | /* |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 715 | * Take gfn and return the reverse mapping to it. |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 716 | */ |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 717 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 718 | { |
| 719 | struct kvm_memory_slot *slot; |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 720 | struct kvm_lpage_info *linfo; |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 721 | |
| 722 | slot = gfn_to_memslot(kvm, gfn); |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 723 | if (likely(level == PT_PAGE_TABLE_LEVEL)) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 724 | return &slot->rmap[gfn - slot->base_gfn]; |
| 725 | |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 726 | linfo = lpage_info_slot(gfn, slot, level); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 727 | |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 728 | return &linfo->rmap_pde; |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 729 | } |
| 730 | |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 731 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 732 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 733 | struct kvm_mmu_page *sp; |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 734 | unsigned long *rmapp; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 735 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 736 | sp = page_header(__pa(spte)); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 737 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 738 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 739 | return pte_list_add(vcpu, spte, rmapp); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 740 | } |
| 741 | |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 742 | static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 743 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 744 | return pte_list_next(rmapp, spte); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 745 | } |
| 746 | |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 747 | static void rmap_remove(struct kvm *kvm, u64 *spte) |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 748 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 749 | struct kvm_mmu_page *sp; |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 750 | gfn_t gfn; |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 751 | unsigned long *rmapp; |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 752 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 753 | sp = page_header(__pa(spte)); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 754 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); |
| 755 | rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 756 | pte_list_remove(spte, rmapp); |
Avi Kivity | cd4a4e5 | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 757 | } |
| 758 | |
Marcelo Tosatti | eb45fda | 2010-10-25 11:58:22 -0200 | [diff] [blame] | 759 | static int set_spte_track_bits(u64 *sptep, u64 new_spte) |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 760 | { |
Avi Kivity | ce06186 | 2010-06-06 14:38:12 +0300 | [diff] [blame] | 761 | pfn_t pfn; |
Xiao Guangrong | 9a3aad7 | 2010-07-16 11:30:18 +0800 | [diff] [blame] | 762 | u64 old_spte = *sptep; |
Avi Kivity | ce06186 | 2010-06-06 14:38:12 +0300 | [diff] [blame] | 763 | |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 764 | if (!spte_has_volatile_bits(old_spte)) |
Xiao Guangrong | 9a3aad7 | 2010-07-16 11:30:18 +0800 | [diff] [blame] | 765 | __set_spte(sptep, new_spte); |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 766 | else |
Xiao Guangrong | 9a3aad7 | 2010-07-16 11:30:18 +0800 | [diff] [blame] | 767 | old_spte = __xchg_spte(sptep, new_spte); |
| 768 | |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 769 | if (!is_rmap_spte(old_spte)) |
Marcelo Tosatti | eb45fda | 2010-10-25 11:58:22 -0200 | [diff] [blame] | 770 | return 0; |
Xiao Guangrong | 8672b72 | 2010-08-02 16:14:04 +0800 | [diff] [blame] | 771 | |
Avi Kivity | a9221dd | 2010-06-06 14:48:06 +0300 | [diff] [blame] | 772 | pfn = spte_to_pfn(old_spte); |
Xiao Guangrong | daa3db6 | 2010-07-16 11:23:04 +0800 | [diff] [blame] | 773 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) |
Avi Kivity | ce06186 | 2010-06-06 14:38:12 +0300 | [diff] [blame] | 774 | kvm_set_pfn_accessed(pfn); |
Xiao Guangrong | 4132779 | 2010-08-02 16:15:08 +0800 | [diff] [blame] | 775 | if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) |
Avi Kivity | ce06186 | 2010-06-06 14:38:12 +0300 | [diff] [blame] | 776 | kvm_set_pfn_dirty(pfn); |
Marcelo Tosatti | eb45fda | 2010-10-25 11:58:22 -0200 | [diff] [blame] | 777 | return 1; |
Xiao Guangrong | e4b502e | 2010-07-16 11:28:09 +0800 | [diff] [blame] | 778 | } |
| 779 | |
| 780 | static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) |
| 781 | { |
Marcelo Tosatti | eb45fda | 2010-10-25 11:58:22 -0200 | [diff] [blame] | 782 | if (set_spte_track_bits(sptep, new_spte)) |
| 783 | rmap_remove(kvm, sptep); |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 784 | } |
| 785 | |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 786 | static int rmap_write_protect(struct kvm *kvm, u64 gfn) |
Izik Eidus | 98348e9 | 2007-10-16 14:42:30 +0200 | [diff] [blame] | 787 | { |
Izik Eidus | 290fc38 | 2007-09-27 14:11:22 +0200 | [diff] [blame] | 788 | unsigned long *rmapp; |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 789 | u64 *spte; |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 790 | int i, write_protected = 0; |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 791 | |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 792 | rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 793 | |
Izik Eidus | 98348e9 | 2007-10-16 14:42:30 +0200 | [diff] [blame] | 794 | spte = rmap_next(kvm, rmapp, NULL); |
| 795 | while (spte) { |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 796 | BUG_ON(!spte); |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 797 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 798 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); |
Takuya Yoshikawa | 8dae444 | 2010-01-18 18:45:10 +0900 | [diff] [blame] | 799 | if (is_writable_pte(*spte)) { |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 800 | update_spte(spte, *spte & ~PT_WRITABLE_MASK); |
Eddie Dong | caa5b8a | 2007-12-18 06:08:27 +0800 | [diff] [blame] | 801 | write_protected = 1; |
| 802 | } |
Izik Eidus | 9647c14 | 2007-10-16 14:43:46 +0200 | [diff] [blame] | 803 | spte = rmap_next(kvm, rmapp, spte); |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 804 | } |
Izik Eidus | 855149a | 2008-03-20 18:17:24 +0200 | [diff] [blame] | 805 | |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 806 | /* check for huge page mappings */ |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 807 | for (i = PT_DIRECTORY_LEVEL; |
| 808 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
| 809 | rmapp = gfn_to_rmap(kvm, gfn, i); |
| 810 | spte = rmap_next(kvm, rmapp, NULL); |
| 811 | while (spte) { |
| 812 | BUG_ON(!spte); |
| 813 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
| 814 | BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); |
| 815 | pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); |
Takuya Yoshikawa | 8dae444 | 2010-01-18 18:45:10 +0900 | [diff] [blame] | 816 | if (is_writable_pte(*spte)) { |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 817 | drop_spte(kvm, spte, |
| 818 | shadow_trap_nonpresent_pte); |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 819 | --kvm->stat.lpages; |
Joerg Roedel | 44ad994 | 2009-07-27 16:30:42 +0200 | [diff] [blame] | 820 | spte = NULL; |
| 821 | write_protected = 1; |
| 822 | } |
| 823 | spte = rmap_next(kvm, rmapp, spte); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 824 | } |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 825 | } |
| 826 | |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 827 | return write_protected; |
Avi Kivity | 374cbac | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 828 | } |
| 829 | |
Frederik Deweerdt | 8a8365c | 2009-10-09 11:42:56 +0000 | [diff] [blame] | 830 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 831 | unsigned long data) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 832 | { |
| 833 | u64 *spte; |
| 834 | int need_tlb_flush = 0; |
| 835 | |
| 836 | while ((spte = rmap_next(kvm, rmapp, NULL))) { |
| 837 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
| 838 | rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 839 | drop_spte(kvm, spte, shadow_trap_nonpresent_pte); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 840 | need_tlb_flush = 1; |
| 841 | } |
| 842 | return need_tlb_flush; |
| 843 | } |
| 844 | |
Frederik Deweerdt | 8a8365c | 2009-10-09 11:42:56 +0000 | [diff] [blame] | 845 | static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 846 | unsigned long data) |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 847 | { |
| 848 | int need_flush = 0; |
Xiao Guangrong | e4b502e | 2010-07-16 11:28:09 +0800 | [diff] [blame] | 849 | u64 *spte, new_spte; |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 850 | pte_t *ptep = (pte_t *)data; |
| 851 | pfn_t new_pfn; |
| 852 | |
| 853 | WARN_ON(pte_huge(*ptep)); |
| 854 | new_pfn = pte_pfn(*ptep); |
| 855 | spte = rmap_next(kvm, rmapp, NULL); |
| 856 | while (spte) { |
| 857 | BUG_ON(!is_shadow_present_pte(*spte)); |
| 858 | rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); |
| 859 | need_flush = 1; |
| 860 | if (pte_write(*ptep)) { |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 861 | drop_spte(kvm, spte, shadow_trap_nonpresent_pte); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 862 | spte = rmap_next(kvm, rmapp, NULL); |
| 863 | } else { |
| 864 | new_spte = *spte &~ (PT64_BASE_ADDR_MASK); |
| 865 | new_spte |= (u64)new_pfn << PAGE_SHIFT; |
| 866 | |
| 867 | new_spte &= ~PT_WRITABLE_MASK; |
| 868 | new_spte &= ~SPTE_HOST_WRITEABLE; |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 869 | new_spte &= ~shadow_accessed_mask; |
Xiao Guangrong | e4b502e | 2010-07-16 11:28:09 +0800 | [diff] [blame] | 870 | set_spte_track_bits(spte, new_spte); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 871 | spte = rmap_next(kvm, rmapp, spte); |
| 872 | } |
| 873 | } |
| 874 | if (need_flush) |
| 875 | kvm_flush_remote_tlbs(kvm); |
| 876 | |
| 877 | return 0; |
| 878 | } |
| 879 | |
Frederik Deweerdt | 8a8365c | 2009-10-09 11:42:56 +0000 | [diff] [blame] | 880 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
| 881 | unsigned long data, |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 882 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, |
Frederik Deweerdt | 8a8365c | 2009-10-09 11:42:56 +0000 | [diff] [blame] | 883 | unsigned long data)) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 884 | { |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 885 | int i, j; |
Avi Kivity | 90bb6fc | 2009-12-31 12:10:16 +0200 | [diff] [blame] | 886 | int ret; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 887 | int retval = 0; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 888 | struct kvm_memslots *slots; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 889 | |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 890 | slots = kvm_memslots(kvm); |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 891 | |
Marcelo Tosatti | 46a26bf | 2009-12-23 14:35:16 -0200 | [diff] [blame] | 892 | for (i = 0; i < slots->nmemslots; i++) { |
| 893 | struct kvm_memory_slot *memslot = &slots->memslots[i]; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 894 | unsigned long start = memslot->userspace_addr; |
| 895 | unsigned long end; |
| 896 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 897 | end = start + (memslot->npages << PAGE_SHIFT); |
| 898 | if (hva >= start && hva < end) { |
| 899 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 900 | gfn_t gfn = memslot->base_gfn + gfn_offset; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 901 | |
Avi Kivity | 90bb6fc | 2009-12-31 12:10:16 +0200 | [diff] [blame] | 902 | ret = handler(kvm, &memslot->rmap[gfn_offset], data); |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 903 | |
| 904 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 905 | struct kvm_lpage_info *linfo; |
Andrea Arcangeli | 6e3e243 | 2010-07-16 11:52:55 +0200 | [diff] [blame] | 906 | |
Takuya Yoshikawa | d4dbf47 | 2010-12-07 12:59:07 +0900 | [diff] [blame] | 907 | linfo = lpage_info_slot(gfn, memslot, |
| 908 | PT_DIRECTORY_LEVEL + j); |
| 909 | ret |= handler(kvm, &linfo->rmap_pde, data); |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 910 | } |
Avi Kivity | 90bb6fc | 2009-12-31 12:10:16 +0200 | [diff] [blame] | 911 | trace_kvm_age_page(hva, memslot, ret); |
| 912 | retval |= ret; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 913 | } |
| 914 | } |
| 915 | |
| 916 | return retval; |
| 917 | } |
| 918 | |
| 919 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
| 920 | { |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 921 | return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 922 | } |
| 923 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 924 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
| 925 | { |
Frederik Deweerdt | 8a8365c | 2009-10-09 11:42:56 +0000 | [diff] [blame] | 926 | kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 927 | } |
| 928 | |
Frederik Deweerdt | 8a8365c | 2009-10-09 11:42:56 +0000 | [diff] [blame] | 929 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 930 | unsigned long data) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 931 | { |
| 932 | u64 *spte; |
| 933 | int young = 0; |
| 934 | |
Rik van Riel | 6316e1c | 2010-02-03 16:11:03 -0500 | [diff] [blame] | 935 | /* |
| 936 | * Emulate the accessed bit for EPT, by checking if this page has |
| 937 | * an EPT mapping, and clearing it if it does. On the next access, |
| 938 | * a new EPT mapping will be established. |
| 939 | * This has some overhead, but not as much as the cost of swapping |
| 940 | * out actively used pages or breaking up actively used hugepages. |
| 941 | */ |
Sheng Yang | 534e38b | 2008-09-08 15:12:30 +0800 | [diff] [blame] | 942 | if (!shadow_accessed_mask) |
Rik van Riel | 6316e1c | 2010-02-03 16:11:03 -0500 | [diff] [blame] | 943 | return kvm_unmap_rmapp(kvm, rmapp, data); |
Sheng Yang | 534e38b | 2008-09-08 15:12:30 +0800 | [diff] [blame] | 944 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 945 | spte = rmap_next(kvm, rmapp, NULL); |
| 946 | while (spte) { |
| 947 | int _young; |
| 948 | u64 _spte = *spte; |
| 949 | BUG_ON(!(_spte & PT_PRESENT_MASK)); |
| 950 | _young = _spte & PT_ACCESSED_MASK; |
| 951 | if (_young) { |
| 952 | young = 1; |
| 953 | clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); |
| 954 | } |
| 955 | spte = rmap_next(kvm, rmapp, spte); |
| 956 | } |
| 957 | return young; |
| 958 | } |
| 959 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 960 | static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 961 | unsigned long data) |
| 962 | { |
| 963 | u64 *spte; |
| 964 | int young = 0; |
| 965 | |
| 966 | /* |
| 967 | * If there's no access bit in the secondary pte set by the |
| 968 | * hardware it's up to gup-fast/gup to set the access bit in |
| 969 | * the primary pte or in the page structure. |
| 970 | */ |
| 971 | if (!shadow_accessed_mask) |
| 972 | goto out; |
| 973 | |
| 974 | spte = rmap_next(kvm, rmapp, NULL); |
| 975 | while (spte) { |
| 976 | u64 _spte = *spte; |
| 977 | BUG_ON(!(_spte & PT_PRESENT_MASK)); |
| 978 | young = _spte & PT_ACCESSED_MASK; |
| 979 | if (young) { |
| 980 | young = 1; |
| 981 | break; |
| 982 | } |
| 983 | spte = rmap_next(kvm, rmapp, spte); |
| 984 | } |
| 985 | out: |
| 986 | return young; |
| 987 | } |
| 988 | |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 989 | #define RMAP_RECYCLE_THRESHOLD 1000 |
| 990 | |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 991 | static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 992 | { |
| 993 | unsigned long *rmapp; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 994 | struct kvm_mmu_page *sp; |
| 995 | |
| 996 | sp = page_header(__pa(spte)); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 997 | |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 998 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 999 | |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1000 | kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 1001 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1002 | } |
| 1003 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1004 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
| 1005 | { |
Izik Eidus | 3da0dd4 | 2009-09-23 21:47:18 +0300 | [diff] [blame] | 1006 | return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1007 | } |
| 1008 | |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1009 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
| 1010 | { |
| 1011 | return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); |
| 1012 | } |
| 1013 | |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 1014 | #ifdef MMU_DEBUG |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 1015 | static int is_empty_shadow_page(u64 *spt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1016 | { |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1017 | u64 *pos; |
| 1018 | u64 *end; |
| 1019 | |
Avi Kivity | 47ad8e6 | 2007-05-06 15:50:58 +0300 | [diff] [blame] | 1020 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
Avi Kivity | 3c91551 | 2008-05-20 16:21:13 +0300 | [diff] [blame] | 1021 | if (is_shadow_present_pte(*pos)) { |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 1022 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1023 | pos, *pos); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1024 | return 0; |
Avi Kivity | 139bdb2 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 1025 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1026 | return 1; |
| 1027 | } |
Yaozu Dong | d6c69ee | 2007-04-25 14:17:25 +0800 | [diff] [blame] | 1028 | #endif |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1029 | |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 1030 | /* |
| 1031 | * This value is the sum of all of the kvm instances's |
| 1032 | * kvm->arch.n_used_mmu_pages values. We need a global, |
| 1033 | * aggregate version in order to make the slab shrinker |
| 1034 | * faster |
| 1035 | */ |
| 1036 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) |
| 1037 | { |
| 1038 | kvm->arch.n_used_mmu_pages += nr; |
| 1039 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| 1040 | } |
| 1041 | |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame^] | 1042 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 1043 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1044 | ASSERT(is_empty_shadow_page(sp->spt)); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1045 | hlist_del(&sp->hash_link); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1046 | list_del(&sp->link); |
Xiao Guangrong | 842f22e | 2011-03-04 19:01:10 +0800 | [diff] [blame] | 1047 | free_page((unsigned long)sp->spt); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1048 | if (!sp->role.direct) |
Xiao Guangrong | 842f22e | 2011-03-04 19:01:10 +0800 | [diff] [blame] | 1049 | free_page((unsigned long)sp->gfns); |
Xiao Guangrong | e8ad9a7 | 2010-05-13 10:06:02 +0800 | [diff] [blame] | 1050 | kmem_cache_free(mmu_page_header_cache, sp); |
Avi Kivity | 260746c | 2007-01-05 16:36:49 -0800 | [diff] [blame] | 1051 | } |
| 1052 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1053 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
| 1054 | { |
Dong, Eddie | 1ae0a13 | 2008-01-07 13:20:25 +0200 | [diff] [blame] | 1055 | return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1056 | } |
| 1057 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1058 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, |
| 1059 | struct kvm_mmu_page *sp, u64 *parent_pte) |
| 1060 | { |
| 1061 | if (!parent_pte) |
| 1062 | return; |
| 1063 | |
| 1064 | pte_list_add(vcpu, parent_pte, &sp->parent_ptes); |
| 1065 | } |
| 1066 | |
| 1067 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, |
| 1068 | u64 *parent_pte) |
| 1069 | { |
| 1070 | pte_list_remove(parent_pte, &sp->parent_ptes); |
| 1071 | } |
| 1072 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 1073 | static void drop_parent_pte(struct kvm_mmu_page *sp, |
| 1074 | u64 *parent_pte) |
| 1075 | { |
| 1076 | mmu_page_remove_parent_pte(sp, parent_pte); |
| 1077 | __set_spte(parent_pte, shadow_trap_nonpresent_pte); |
| 1078 | } |
| 1079 | |
Avi Kivity | 25c0de2 | 2007-01-05 16:36:42 -0800 | [diff] [blame] | 1080 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1081 | u64 *parent_pte, int direct) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1082 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1083 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1084 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, |
| 1085 | sizeof *sp); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 1086 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1087 | if (!direct) |
| 1088 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, |
| 1089 | PAGE_SIZE); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1090 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 1091 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
Sheng Yang | 291f26b | 2008-10-16 17:30:57 +0800 | [diff] [blame] | 1092 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1093 | sp->parent_ptes = 0; |
| 1094 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 1095 | kvm_mod_used_mmu_pages(vcpu->kvm, +1); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1096 | return sp; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1097 | } |
| 1098 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1099 | static void mark_unsync(u64 *spte); |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 1100 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 1101 | { |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1102 | pte_list_walk(&sp->parent_ptes, mark_unsync); |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1103 | } |
| 1104 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1105 | static void mark_unsync(u64 *spte) |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1106 | { |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1107 | struct kvm_mmu_page *sp; |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1108 | unsigned int index; |
| 1109 | |
Xiao Guangrong | 67052b3 | 2011-05-15 23:27:08 +0800 | [diff] [blame] | 1110 | sp = page_header(__pa(spte)); |
Xiao Guangrong | 1047df1 | 2010-06-11 21:35:15 +0800 | [diff] [blame] | 1111 | index = spte - sp->spt; |
| 1112 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) |
| 1113 | return; |
| 1114 | if (sp->unsync_children++) |
| 1115 | return; |
| 1116 | kvm_mmu_mark_parents_unsync(sp); |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 1117 | } |
| 1118 | |
Avi Kivity | d761a50 | 2008-05-29 14:55:03 +0300 | [diff] [blame] | 1119 | static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, |
| 1120 | struct kvm_mmu_page *sp) |
| 1121 | { |
| 1122 | int i; |
| 1123 | |
| 1124 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
| 1125 | sp->spt[i] = shadow_trap_nonpresent_pte; |
| 1126 | } |
| 1127 | |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 1128 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, |
Xiao Guangrong | a4a8e6f | 2010-11-19 17:04:03 +0800 | [diff] [blame] | 1129 | struct kvm_mmu_page *sp) |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 1130 | { |
| 1131 | return 1; |
| 1132 | } |
| 1133 | |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 1134 | static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
| 1135 | { |
| 1136 | } |
| 1137 | |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 1138 | static void nonpaging_update_pte(struct kvm_vcpu *vcpu, |
| 1139 | struct kvm_mmu_page *sp, u64 *spte, |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 1140 | const void *pte) |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 1141 | { |
| 1142 | WARN_ON(1); |
| 1143 | } |
| 1144 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1145 | #define KVM_PAGE_ARRAY_NR 16 |
| 1146 | |
| 1147 | struct kvm_mmu_pages { |
| 1148 | struct mmu_page_and_offset { |
| 1149 | struct kvm_mmu_page *sp; |
| 1150 | unsigned int idx; |
| 1151 | } page[KVM_PAGE_ARRAY_NR]; |
| 1152 | unsigned int nr; |
| 1153 | }; |
| 1154 | |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 1155 | #define for_each_unsync_children(bitmap, idx) \ |
| 1156 | for (idx = find_first_bit(bitmap, 512); \ |
| 1157 | idx < 512; \ |
| 1158 | idx = find_next_bit(bitmap, 512, idx+1)) |
| 1159 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 1160 | static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, |
| 1161 | int idx) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1162 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1163 | int i; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1164 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1165 | if (sp->unsync) |
| 1166 | for (i=0; i < pvec->nr; i++) |
| 1167 | if (pvec->page[i].sp == sp) |
| 1168 | return 0; |
| 1169 | |
| 1170 | pvec->page[pvec->nr].sp = sp; |
| 1171 | pvec->page[pvec->nr].idx = idx; |
| 1172 | pvec->nr++; |
| 1173 | return (pvec->nr == KVM_PAGE_ARRAY_NR); |
| 1174 | } |
| 1175 | |
| 1176 | static int __mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 1177 | struct kvm_mmu_pages *pvec) |
| 1178 | { |
| 1179 | int i, ret, nr_unsync_leaf = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1180 | |
Marcelo Tosatti | 0074ff6 | 2008-09-23 13:18:40 -0300 | [diff] [blame] | 1181 | for_each_unsync_children(sp->unsync_child_bitmap, i) { |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1182 | struct kvm_mmu_page *child; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1183 | u64 ent = sp->spt[i]; |
| 1184 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1185 | if (!is_shadow_present_pte(ent) || is_large_pte(ent)) |
| 1186 | goto clear_child_bitmap; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1187 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1188 | child = page_header(ent & PT64_BASE_ADDR_MASK); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1189 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1190 | if (child->unsync_children) { |
| 1191 | if (mmu_pages_add(pvec, child, i)) |
| 1192 | return -ENOSPC; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1193 | |
Xiao Guangrong | 7a8f1a7 | 2010-06-11 21:34:04 +0800 | [diff] [blame] | 1194 | ret = __mmu_unsync_walk(child, pvec); |
| 1195 | if (!ret) |
| 1196 | goto clear_child_bitmap; |
| 1197 | else if (ret > 0) |
| 1198 | nr_unsync_leaf += ret; |
| 1199 | else |
| 1200 | return ret; |
| 1201 | } else if (child->unsync) { |
| 1202 | nr_unsync_leaf++; |
| 1203 | if (mmu_pages_add(pvec, child, i)) |
| 1204 | return -ENOSPC; |
| 1205 | } else |
| 1206 | goto clear_child_bitmap; |
| 1207 | |
| 1208 | continue; |
| 1209 | |
| 1210 | clear_child_bitmap: |
| 1211 | __clear_bit(i, sp->unsync_child_bitmap); |
| 1212 | sp->unsync_children--; |
| 1213 | WARN_ON((int)sp->unsync_children < 0); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1214 | } |
| 1215 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1216 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1217 | return nr_unsync_leaf; |
| 1218 | } |
| 1219 | |
| 1220 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 1221 | struct kvm_mmu_pages *pvec) |
| 1222 | { |
| 1223 | if (!sp->unsync_children) |
| 1224 | return 0; |
| 1225 | |
| 1226 | mmu_pages_add(pvec, sp, 0); |
| 1227 | return __mmu_unsync_walk(sp, pvec); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1228 | } |
| 1229 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1230 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1231 | { |
| 1232 | WARN_ON(!sp->unsync); |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 1233 | trace_kvm_mmu_sync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1234 | sp->unsync = 0; |
| 1235 | --kvm->stat.mmu_unsync; |
| 1236 | } |
| 1237 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1238 | static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 1239 | struct list_head *invalid_list); |
| 1240 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 1241 | struct list_head *invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1242 | |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1243 | #define for_each_gfn_sp(kvm, sp, gfn, pos) \ |
| 1244 | hlist_for_each_entry(sp, pos, \ |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1245 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ |
| 1246 | if ((sp)->gfn != (gfn)) {} else |
| 1247 | |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1248 | #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ |
| 1249 | hlist_for_each_entry(sp, pos, \ |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1250 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ |
| 1251 | if ((sp)->gfn != (gfn) || (sp)->role.direct || \ |
| 1252 | (sp)->role.invalid) {} else |
| 1253 | |
Xiao Guangrong | f918b44 | 2010-06-11 21:30:36 +0800 | [diff] [blame] | 1254 | /* @sp->gfn should be write-protected at the call site */ |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1255 | static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1256 | struct list_head *invalid_list, bool clear_unsync) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1257 | { |
Avi Kivity | 5b7e010 | 2010-04-14 19:20:03 +0300 | [diff] [blame] | 1258 | if (sp->role.cr4_pae != !!is_pae(vcpu)) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1259 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1260 | return 1; |
| 1261 | } |
| 1262 | |
Xiao Guangrong | f918b44 | 2010-06-11 21:30:36 +0800 | [diff] [blame] | 1263 | if (clear_unsync) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1264 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1265 | |
Xiao Guangrong | a4a8e6f | 2010-11-19 17:04:03 +0800 | [diff] [blame] | 1266 | if (vcpu->arch.mmu.sync_page(vcpu, sp)) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1267 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1268 | return 1; |
| 1269 | } |
| 1270 | |
| 1271 | kvm_mmu_flush_tlb(vcpu); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1272 | return 0; |
| 1273 | } |
| 1274 | |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1275 | static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, |
| 1276 | struct kvm_mmu_page *sp) |
| 1277 | { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1278 | LIST_HEAD(invalid_list); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1279 | int ret; |
| 1280 | |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1281 | ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); |
Xiao Guangrong | be71e06 | 2010-06-11 21:31:38 +0800 | [diff] [blame] | 1282 | if (ret) |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1283 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 1284 | |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1285 | return ret; |
| 1286 | } |
| 1287 | |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1288 | static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 1289 | struct list_head *invalid_list) |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1290 | { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1291 | return __kvm_sync_page(vcpu, sp, invalid_list, true); |
Xiao Guangrong | 1d9dc7e | 2010-05-15 18:51:24 +0800 | [diff] [blame] | 1292 | } |
| 1293 | |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1294 | /* @gfn should be write-protected at the call site */ |
| 1295 | static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 1296 | { |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1297 | struct kvm_mmu_page *s; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1298 | struct hlist_node *node; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1299 | LIST_HEAD(invalid_list); |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1300 | bool flush = false; |
| 1301 | |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1302 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1303 | if (!s->unsync) |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1304 | continue; |
| 1305 | |
| 1306 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); |
Xiao Guangrong | a4a8e6f | 2010-11-19 17:04:03 +0800 | [diff] [blame] | 1307 | kvm_unlink_unsync_page(vcpu->kvm, s); |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1308 | if ((s->role.cr4_pae != !!is_pae(vcpu)) || |
Xiao Guangrong | a4a8e6f | 2010-11-19 17:04:03 +0800 | [diff] [blame] | 1309 | (vcpu->arch.mmu.sync_page(vcpu, s))) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1310 | kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1311 | continue; |
| 1312 | } |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1313 | flush = true; |
| 1314 | } |
| 1315 | |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1316 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1317 | if (flush) |
| 1318 | kvm_mmu_flush_tlb(vcpu); |
| 1319 | } |
| 1320 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1321 | struct mmu_page_path { |
| 1322 | struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; |
| 1323 | unsigned int idx[PT64_ROOT_LEVEL-1]; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1324 | }; |
| 1325 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1326 | #define for_each_sp(pvec, sp, parents, i) \ |
| 1327 | for (i = mmu_pages_next(&pvec, &parents, -1), \ |
| 1328 | sp = pvec.page[i].sp; \ |
| 1329 | i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ |
| 1330 | i = mmu_pages_next(&pvec, &parents, i)) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1331 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 1332 | static int mmu_pages_next(struct kvm_mmu_pages *pvec, |
| 1333 | struct mmu_page_path *parents, |
| 1334 | int i) |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1335 | { |
| 1336 | int n; |
| 1337 | |
| 1338 | for (n = i+1; n < pvec->nr; n++) { |
| 1339 | struct kvm_mmu_page *sp = pvec->page[n].sp; |
| 1340 | |
| 1341 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) { |
| 1342 | parents->idx[0] = pvec->page[n].idx; |
| 1343 | return n; |
| 1344 | } |
| 1345 | |
| 1346 | parents->parent[sp->role.level-2] = sp; |
| 1347 | parents->idx[sp->role.level-1] = pvec->page[n].idx; |
| 1348 | } |
| 1349 | |
| 1350 | return n; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1351 | } |
| 1352 | |
Hannes Eder | cded19f | 2009-02-21 02:19:13 +0100 | [diff] [blame] | 1353 | static void mmu_pages_clear_parents(struct mmu_page_path *parents) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1354 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1355 | struct kvm_mmu_page *sp; |
| 1356 | unsigned int level = 0; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1357 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1358 | do { |
| 1359 | unsigned int idx = parents->idx[level]; |
| 1360 | |
| 1361 | sp = parents->parent[level]; |
| 1362 | if (!sp) |
| 1363 | return; |
| 1364 | |
| 1365 | --sp->unsync_children; |
| 1366 | WARN_ON((int)sp->unsync_children < 0); |
| 1367 | __clear_bit(idx, sp->unsync_child_bitmap); |
| 1368 | level++; |
| 1369 | } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); |
| 1370 | } |
| 1371 | |
| 1372 | static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, |
| 1373 | struct mmu_page_path *parents, |
| 1374 | struct kvm_mmu_pages *pvec) |
| 1375 | { |
| 1376 | parents->parent[parent->role.level-1] = NULL; |
| 1377 | pvec->nr = 0; |
| 1378 | } |
| 1379 | |
| 1380 | static void mmu_sync_children(struct kvm_vcpu *vcpu, |
| 1381 | struct kvm_mmu_page *parent) |
| 1382 | { |
| 1383 | int i; |
| 1384 | struct kvm_mmu_page *sp; |
| 1385 | struct mmu_page_path parents; |
| 1386 | struct kvm_mmu_pages pages; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1387 | LIST_HEAD(invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1388 | |
| 1389 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1390 | while (mmu_unsync_walk(parent, &pages)) { |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1391 | int protected = 0; |
| 1392 | |
| 1393 | for_each_sp(pages, sp, parents, i) |
| 1394 | protected |= rmap_write_protect(vcpu->kvm, sp->gfn); |
| 1395 | |
| 1396 | if (protected) |
| 1397 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1398 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1399 | for_each_sp(pages, sp, parents, i) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1400 | kvm_sync_page(vcpu, sp, &invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1401 | mmu_pages_clear_parents(&parents); |
| 1402 | } |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1403 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1404 | cond_resched_lock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1405 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1406 | } |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1407 | } |
| 1408 | |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1409 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
| 1410 | gfn_t gfn, |
| 1411 | gva_t gaddr, |
| 1412 | unsigned level, |
Avi Kivity | f6e2c02 | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 1413 | int direct, |
Avi Kivity | 41074d0 | 2007-12-09 17:00:02 +0200 | [diff] [blame] | 1414 | unsigned access, |
Avi Kivity | f7d9c7b | 2008-02-26 22:12:10 +0200 | [diff] [blame] | 1415 | u64 *parent_pte) |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1416 | { |
| 1417 | union kvm_mmu_page_role role; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1418 | unsigned quadrant; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1419 | struct kvm_mmu_page *sp; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1420 | struct hlist_node *node; |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1421 | bool need_sync = false; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1422 | |
Avi Kivity | a770f6f | 2008-12-21 19:20:09 +0200 | [diff] [blame] | 1423 | role = vcpu->arch.mmu.base_role; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1424 | role.level = level; |
Avi Kivity | f6e2c02 | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 1425 | role.direct = direct; |
Avi Kivity | 84b0c8c | 2010-03-14 10:16:40 +0200 | [diff] [blame] | 1426 | if (role.direct) |
Avi Kivity | 5b7e010 | 2010-04-14 19:20:03 +0300 | [diff] [blame] | 1427 | role.cr4_pae = 0; |
Avi Kivity | 41074d0 | 2007-12-09 17:00:02 +0200 | [diff] [blame] | 1428 | role.access = access; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 1429 | if (!vcpu->arch.mmu.direct_map |
| 1430 | && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1431 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
| 1432 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
| 1433 | role.quadrant = quadrant; |
| 1434 | } |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1435 | for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1436 | if (!need_sync && sp->unsync) |
| 1437 | need_sync = true; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1438 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1439 | if (sp->role.word != role.word) |
| 1440 | continue; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1441 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1442 | if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) |
| 1443 | break; |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 1444 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1445 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
| 1446 | if (sp->unsync_children) { |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1447 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1448 | kvm_mmu_mark_parents_unsync(sp); |
| 1449 | } else if (sp->unsync) |
| 1450 | kvm_mmu_mark_parents_unsync(sp); |
Xiao Guangrong | e02aa90 | 2010-05-15 18:52:34 +0800 | [diff] [blame] | 1451 | |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1452 | trace_kvm_mmu_get_page(sp, false); |
| 1453 | return sp; |
| 1454 | } |
Avi Kivity | dfc5aa0 | 2007-12-18 19:47:18 +0200 | [diff] [blame] | 1455 | ++vcpu->kvm->stat.mmu_cache_miss; |
Lai Jiangshan | 2032a93 | 2010-05-26 16:49:59 +0800 | [diff] [blame] | 1456 | sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1457 | if (!sp) |
| 1458 | return sp; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1459 | sp->gfn = gfn; |
| 1460 | sp->role = role; |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1461 | hlist_add_head(&sp->hash_link, |
| 1462 | &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); |
Avi Kivity | f6e2c02 | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 1463 | if (!direct) { |
Marcelo Tosatti | b1a3682 | 2008-12-01 22:32:03 -0200 | [diff] [blame] | 1464 | if (rmap_write_protect(vcpu->kvm, gfn)) |
| 1465 | kvm_flush_remote_tlbs(vcpu->kvm); |
Xiao Guangrong | 9f1a122 | 2010-05-24 15:41:33 +0800 | [diff] [blame] | 1466 | if (level > PT_PAGE_TABLE_LEVEL && need_sync) |
| 1467 | kvm_sync_pages(vcpu, gfn); |
| 1468 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1469 | account_shadowed(vcpu->kvm, gfn); |
| 1470 | } |
Avi Kivity | 131d827 | 2008-05-29 14:56:28 +0300 | [diff] [blame] | 1471 | if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) |
| 1472 | vcpu->arch.mmu.prefetch_page(vcpu, sp); |
| 1473 | else |
| 1474 | nonpaging_prefetch_page(vcpu, sp); |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 1475 | trace_kvm_mmu_get_page(sp, true); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1476 | return sp; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1477 | } |
| 1478 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 1479 | static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, |
| 1480 | struct kvm_vcpu *vcpu, u64 addr) |
| 1481 | { |
| 1482 | iterator->addr = addr; |
| 1483 | iterator->shadow_addr = vcpu->arch.mmu.root_hpa; |
| 1484 | iterator->level = vcpu->arch.mmu.shadow_root_level; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 1485 | |
| 1486 | if (iterator->level == PT64_ROOT_LEVEL && |
| 1487 | vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && |
| 1488 | !vcpu->arch.mmu.direct_map) |
| 1489 | --iterator->level; |
| 1490 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 1491 | if (iterator->level == PT32E_ROOT_LEVEL) { |
| 1492 | iterator->shadow_addr |
| 1493 | = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; |
| 1494 | iterator->shadow_addr &= PT64_BASE_ADDR_MASK; |
| 1495 | --iterator->level; |
| 1496 | if (!iterator->shadow_addr) |
| 1497 | iterator->level = 0; |
| 1498 | } |
| 1499 | } |
| 1500 | |
| 1501 | static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) |
| 1502 | { |
| 1503 | if (iterator->level < PT_PAGE_TABLE_LEVEL) |
| 1504 | return false; |
Marcelo Tosatti | 4d88954 | 2009-06-11 12:07:41 -0300 | [diff] [blame] | 1505 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 1506 | iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); |
| 1507 | iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; |
| 1508 | return true; |
| 1509 | } |
| 1510 | |
| 1511 | static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) |
| 1512 | { |
Xiao Guangrong | 052331b | 2011-07-12 03:21:17 +0800 | [diff] [blame] | 1513 | if (is_last_spte(*iterator->sptep, iterator->level)) { |
| 1514 | iterator->level = 0; |
| 1515 | return; |
| 1516 | } |
| 1517 | |
Avi Kivity | 2d11123 | 2008-12-25 14:39:47 +0200 | [diff] [blame] | 1518 | iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK; |
| 1519 | --iterator->level; |
| 1520 | } |
| 1521 | |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 1522 | static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) |
| 1523 | { |
| 1524 | u64 spte; |
| 1525 | |
| 1526 | spte = __pa(sp->spt) |
| 1527 | | PT_PRESENT_MASK | PT_ACCESSED_MASK |
| 1528 | | PT_WRITABLE_MASK | PT_USER_MASK; |
Avi Kivity | 121eee9 | 2010-07-13 14:27:05 +0300 | [diff] [blame] | 1529 | __set_spte(sptep, spte); |
Avi Kivity | 32ef26a | 2010-07-13 14:27:04 +0300 | [diff] [blame] | 1530 | } |
| 1531 | |
Avi Kivity | a3aa51c | 2010-07-13 14:27:06 +0300 | [diff] [blame] | 1532 | static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) |
| 1533 | { |
| 1534 | if (is_large_pte(*sptep)) { |
| 1535 | drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); |
| 1536 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1537 | } |
| 1538 | } |
| 1539 | |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 1540 | static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 1541 | unsigned direct_access) |
| 1542 | { |
| 1543 | if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { |
| 1544 | struct kvm_mmu_page *child; |
| 1545 | |
| 1546 | /* |
| 1547 | * For the direct sp, if the guest pte's dirty bit |
| 1548 | * changed form clean to dirty, it will corrupt the |
| 1549 | * sp's access: allow writable in the read-only sp, |
| 1550 | * so we should update the spte at this point to get |
| 1551 | * a new sp with the correct access. |
| 1552 | */ |
| 1553 | child = page_header(*sptep & PT64_BASE_ADDR_MASK); |
| 1554 | if (child->role.access == direct_access) |
| 1555 | return; |
| 1556 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 1557 | drop_parent_pte(child, sptep); |
Avi Kivity | a357bd2 | 2010-07-13 14:27:07 +0300 | [diff] [blame] | 1558 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1559 | } |
| 1560 | } |
| 1561 | |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 1562 | static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 1563 | u64 *spte) |
| 1564 | { |
| 1565 | u64 pte; |
| 1566 | struct kvm_mmu_page *child; |
| 1567 | |
| 1568 | pte = *spte; |
| 1569 | if (is_shadow_present_pte(pte)) { |
| 1570 | if (is_last_spte(pte, sp->role.level)) |
| 1571 | drop_spte(kvm, spte, shadow_trap_nonpresent_pte); |
| 1572 | else { |
| 1573 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 1574 | drop_parent_pte(child, spte); |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 1575 | } |
| 1576 | } |
| 1577 | __set_spte(spte, shadow_trap_nonpresent_pte); |
| 1578 | if (is_large_pte(pte)) |
| 1579 | --kvm->stat.lpages; |
| 1580 | } |
| 1581 | |
Avi Kivity | 90cb052 | 2007-07-17 13:04:56 +0300 | [diff] [blame] | 1582 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1583 | struct kvm_mmu_page *sp) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1584 | { |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 1585 | unsigned i; |
Avi Kivity | 697fe2e | 2007-01-05 16:36:46 -0800 | [diff] [blame] | 1586 | |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 1587 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
| 1588 | mmu_page_zap_pte(kvm, sp, sp->spt + i); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1589 | } |
| 1590 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1591 | static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1592 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1593 | mmu_page_remove_parent_pte(sp, parent_pte); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1594 | } |
| 1595 | |
Avi Kivity | 12b7d28 | 2007-09-23 14:10:49 +0200 | [diff] [blame] | 1596 | static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) |
| 1597 | { |
| 1598 | int i; |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 1599 | struct kvm_vcpu *vcpu; |
Avi Kivity | 12b7d28 | 2007-09-23 14:10:49 +0200 | [diff] [blame] | 1600 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 1601 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 1602 | vcpu->arch.last_pte_updated = NULL; |
Avi Kivity | 12b7d28 | 2007-09-23 14:10:49 +0200 | [diff] [blame] | 1603 | } |
| 1604 | |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 1605 | static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1606 | { |
| 1607 | u64 *parent_pte; |
| 1608 | |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 1609 | while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL))) |
| 1610 | drop_parent_pte(sp, parent_pte); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 1611 | } |
| 1612 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1613 | static int mmu_zap_unsync_children(struct kvm *kvm, |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1614 | struct kvm_mmu_page *parent, |
| 1615 | struct list_head *invalid_list) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1616 | { |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1617 | int i, zapped = 0; |
| 1618 | struct mmu_page_path parents; |
| 1619 | struct kvm_mmu_pages pages; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1620 | |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1621 | if (parent->role.level == PT_PAGE_TABLE_LEVEL) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1622 | return 0; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1623 | |
| 1624 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1625 | while (mmu_unsync_walk(parent, &pages)) { |
| 1626 | struct kvm_mmu_page *sp; |
| 1627 | |
| 1628 | for_each_sp(pages, sp, parents, i) { |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1629 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1630 | mmu_pages_clear_parents(&parents); |
Xiao Guangrong | 77662e0 | 2010-04-16 16:34:42 +0800 | [diff] [blame] | 1631 | zapped++; |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1632 | } |
Marcelo Tosatti | 60c8aec | 2008-12-01 22:32:02 -0200 | [diff] [blame] | 1633 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1634 | } |
| 1635 | |
| 1636 | return zapped; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1637 | } |
| 1638 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1639 | static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 1640 | struct list_head *invalid_list) |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 1641 | { |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1642 | int ret; |
Avi Kivity | f691fe1 | 2009-07-06 15:58:14 +0300 | [diff] [blame] | 1643 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1644 | trace_kvm_mmu_prepare_zap_page(sp); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 1645 | ++kvm->stat.mmu_shadow_zapped; |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1646 | ret = mmu_zap_unsync_children(kvm, sp, invalid_list); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1647 | kvm_mmu_page_unlink_children(kvm, sp); |
Avi Kivity | 31aa2b4 | 2008-07-11 17:59:46 +0300 | [diff] [blame] | 1648 | kvm_mmu_unlink_parents(kvm, sp); |
Avi Kivity | f6e2c02 | 2009-01-11 13:02:10 +0200 | [diff] [blame] | 1649 | if (!sp->role.invalid && !sp->role.direct) |
Avi Kivity | 5b5c6a5 | 2008-07-11 18:07:26 +0300 | [diff] [blame] | 1650 | unaccount_shadowed(kvm, sp->gfn); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1651 | if (sp->unsync) |
| 1652 | kvm_unlink_unsync_page(kvm, sp); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1653 | if (!sp->root_count) { |
Gui Jianfeng | 54a4f02 | 2010-05-05 09:03:49 +0800 | [diff] [blame] | 1654 | /* Count self */ |
| 1655 | ret++; |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1656 | list_move(&sp->link, invalid_list); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame^] | 1657 | kvm_mod_used_mmu_pages(kvm, -1); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 1658 | } else { |
Avi Kivity | 5b5c6a5 | 2008-07-11 18:07:26 +0300 | [diff] [blame] | 1659 | list_move(&sp->link, &kvm->arch.active_mmu_pages); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 1660 | kvm_reload_remote_mmus(kvm); |
| 1661 | } |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1662 | |
| 1663 | sp->role.invalid = 1; |
Avi Kivity | 12b7d28 | 2007-09-23 14:10:49 +0200 | [diff] [blame] | 1664 | kvm_mmu_reset_last_pte_updated(kvm); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1665 | return ret; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1666 | } |
| 1667 | |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1668 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 1669 | struct list_head *invalid_list) |
| 1670 | { |
| 1671 | struct kvm_mmu_page *sp; |
| 1672 | |
| 1673 | if (list_empty(invalid_list)) |
| 1674 | return; |
| 1675 | |
| 1676 | kvm_flush_remote_tlbs(kvm); |
| 1677 | |
| 1678 | do { |
| 1679 | sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); |
| 1680 | WARN_ON(!sp->role.invalid || sp->root_count); |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame^] | 1681 | kvm_mmu_free_page(sp); |
Xiao Guangrong | 7775834 | 2010-06-04 21:53:54 +0800 | [diff] [blame] | 1682 | } while (!list_empty(invalid_list)); |
| 1683 | |
| 1684 | } |
| 1685 | |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1686 | /* |
| 1687 | * Changing the number of mmu pages allocated to the vm |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 1688 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1689 | */ |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 1690 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1691 | { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1692 | LIST_HEAD(invalid_list); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1693 | /* |
| 1694 | * If we set the number of mmu pages to be smaller be than the |
| 1695 | * number of actived pages , we must to free some mmu pages before we |
| 1696 | * change the value |
| 1697 | */ |
| 1698 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 1699 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { |
| 1700 | while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && |
Xiao Guangrong | 77662e0 | 2010-04-16 16:34:42 +0800 | [diff] [blame] | 1701 | !list_empty(&kvm->arch.active_mmu_pages)) { |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1702 | struct kvm_mmu_page *page; |
| 1703 | |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 1704 | page = container_of(kvm->arch.active_mmu_pages.prev, |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1705 | struct kvm_mmu_page, link); |
Xiaotian Feng | 80b63fa | 2010-08-24 10:31:07 +0800 | [diff] [blame] | 1706 | kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1707 | } |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame^] | 1708 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 1709 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1710 | } |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1711 | |
Dave Hansen | 49d5ca2 | 2010-08-19 18:11:28 -0700 | [diff] [blame] | 1712 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
Izik Eidus | 82ce2c9 | 2007-10-02 18:52:55 +0200 | [diff] [blame] | 1713 | } |
| 1714 | |
Anthony Liguori | f67a46f | 2007-10-10 19:25:50 -0500 | [diff] [blame] | 1715 | static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1716 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1717 | struct kvm_mmu_page *sp; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1718 | struct hlist_node *node; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1719 | LIST_HEAD(invalid_list); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1720 | int r; |
| 1721 | |
Xiao Guangrong | 9ad17b1 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 1722 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1723 | r = 0; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1724 | |
| 1725 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { |
Xiao Guangrong | 9ad17b1 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 1726 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1727 | sp->role.word); |
| 1728 | r = 1; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1729 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1730 | } |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1731 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 1732 | return r; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 1733 | } |
| 1734 | |
Anthony Liguori | f67a46f | 2007-10-10 19:25:50 -0500 | [diff] [blame] | 1735 | static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) |
Avi Kivity | 97a0a01 | 2007-05-31 15:08:29 +0300 | [diff] [blame] | 1736 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1737 | struct kvm_mmu_page *sp; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1738 | struct hlist_node *node; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1739 | LIST_HEAD(invalid_list); |
Avi Kivity | 97a0a01 | 2007-05-31 15:08:29 +0300 | [diff] [blame] | 1740 | |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1741 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { |
Xiao Guangrong | 9ad17b1 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 1742 | pgprintk("%s: zap %llx %x\n", |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1743 | __func__, gfn, sp->role.word); |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1744 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
Avi Kivity | 97a0a01 | 2007-05-31 15:08:29 +0300 | [diff] [blame] | 1745 | } |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 1746 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Avi Kivity | 97a0a01 | 2007-05-31 15:08:29 +0300 | [diff] [blame] | 1747 | } |
| 1748 | |
Avi Kivity | 38c335f | 2007-11-21 14:20:22 +0200 | [diff] [blame] | 1749 | static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1750 | { |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 1751 | int slot = memslot_id(kvm, gfn); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 1752 | struct kvm_mmu_page *sp = page_header(__pa(pte)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1753 | |
Sheng Yang | 291f26b | 2008-10-16 17:30:57 +0800 | [diff] [blame] | 1754 | __set_bit(slot, sp->slot_bitmap); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1755 | } |
| 1756 | |
Marcelo Tosatti | 6844dec | 2008-09-23 13:18:38 -0300 | [diff] [blame] | 1757 | static void mmu_convert_notrap(struct kvm_mmu_page *sp) |
| 1758 | { |
| 1759 | int i; |
| 1760 | u64 *pt = sp->spt; |
| 1761 | |
| 1762 | if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte) |
| 1763 | return; |
| 1764 | |
| 1765 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
| 1766 | if (pt[i] == shadow_notrap_nonpresent_pte) |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 1767 | __set_spte(&pt[i], shadow_trap_nonpresent_pte); |
Marcelo Tosatti | 6844dec | 2008-09-23 13:18:38 -0300 | [diff] [blame] | 1768 | } |
| 1769 | } |
| 1770 | |
Sheng Yang | 74be52e | 2008-10-09 16:01:56 +0800 | [diff] [blame] | 1771 | /* |
| 1772 | * The function is based on mtrr_type_lookup() in |
| 1773 | * arch/x86/kernel/cpu/mtrr/generic.c |
| 1774 | */ |
| 1775 | static int get_mtrr_type(struct mtrr_state_type *mtrr_state, |
| 1776 | u64 start, u64 end) |
| 1777 | { |
| 1778 | int i; |
| 1779 | u64 base, mask; |
| 1780 | u8 prev_match, curr_match; |
| 1781 | int num_var_ranges = KVM_NR_VAR_MTRR; |
| 1782 | |
| 1783 | if (!mtrr_state->enabled) |
| 1784 | return 0xFF; |
| 1785 | |
| 1786 | /* Make end inclusive end, instead of exclusive */ |
| 1787 | end--; |
| 1788 | |
| 1789 | /* Look in fixed ranges. Just return the type as per start */ |
| 1790 | if (mtrr_state->have_fixed && (start < 0x100000)) { |
| 1791 | int idx; |
| 1792 | |
| 1793 | if (start < 0x80000) { |
| 1794 | idx = 0; |
| 1795 | idx += (start >> 16); |
| 1796 | return mtrr_state->fixed_ranges[idx]; |
| 1797 | } else if (start < 0xC0000) { |
| 1798 | idx = 1 * 8; |
| 1799 | idx += ((start - 0x80000) >> 14); |
| 1800 | return mtrr_state->fixed_ranges[idx]; |
| 1801 | } else if (start < 0x1000000) { |
| 1802 | idx = 3 * 8; |
| 1803 | idx += ((start - 0xC0000) >> 12); |
| 1804 | return mtrr_state->fixed_ranges[idx]; |
| 1805 | } |
| 1806 | } |
| 1807 | |
| 1808 | /* |
| 1809 | * Look in variable ranges |
| 1810 | * Look of multiple ranges matching this address and pick type |
| 1811 | * as per MTRR precedence |
| 1812 | */ |
| 1813 | if (!(mtrr_state->enabled & 2)) |
| 1814 | return mtrr_state->def_type; |
| 1815 | |
| 1816 | prev_match = 0xFF; |
| 1817 | for (i = 0; i < num_var_ranges; ++i) { |
| 1818 | unsigned short start_state, end_state; |
| 1819 | |
| 1820 | if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) |
| 1821 | continue; |
| 1822 | |
| 1823 | base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + |
| 1824 | (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); |
| 1825 | mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + |
| 1826 | (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); |
| 1827 | |
| 1828 | start_state = ((start & mask) == (base & mask)); |
| 1829 | end_state = ((end & mask) == (base & mask)); |
| 1830 | if (start_state != end_state) |
| 1831 | return 0xFE; |
| 1832 | |
| 1833 | if ((start & mask) != (base & mask)) |
| 1834 | continue; |
| 1835 | |
| 1836 | curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; |
| 1837 | if (prev_match == 0xFF) { |
| 1838 | prev_match = curr_match; |
| 1839 | continue; |
| 1840 | } |
| 1841 | |
| 1842 | if (prev_match == MTRR_TYPE_UNCACHABLE || |
| 1843 | curr_match == MTRR_TYPE_UNCACHABLE) |
| 1844 | return MTRR_TYPE_UNCACHABLE; |
| 1845 | |
| 1846 | if ((prev_match == MTRR_TYPE_WRBACK && |
| 1847 | curr_match == MTRR_TYPE_WRTHROUGH) || |
| 1848 | (prev_match == MTRR_TYPE_WRTHROUGH && |
| 1849 | curr_match == MTRR_TYPE_WRBACK)) { |
| 1850 | prev_match = MTRR_TYPE_WRTHROUGH; |
| 1851 | curr_match = MTRR_TYPE_WRTHROUGH; |
| 1852 | } |
| 1853 | |
| 1854 | if (prev_match != curr_match) |
| 1855 | return MTRR_TYPE_UNCACHABLE; |
| 1856 | } |
| 1857 | |
| 1858 | if (prev_match != 0xFF) |
| 1859 | return prev_match; |
| 1860 | |
| 1861 | return mtrr_state->def_type; |
| 1862 | } |
| 1863 | |
Sheng Yang | 4b12f0d | 2009-04-27 20:35:42 +0800 | [diff] [blame] | 1864 | u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) |
Sheng Yang | 74be52e | 2008-10-09 16:01:56 +0800 | [diff] [blame] | 1865 | { |
| 1866 | u8 mtrr; |
| 1867 | |
| 1868 | mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, |
| 1869 | (gfn << PAGE_SHIFT) + PAGE_SIZE); |
| 1870 | if (mtrr == 0xfe || mtrr == 0xff) |
| 1871 | mtrr = MTRR_TYPE_WRBACK; |
| 1872 | return mtrr; |
| 1873 | } |
Sheng Yang | 4b12f0d | 2009-04-27 20:35:42 +0800 | [diff] [blame] | 1874 | EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type); |
Sheng Yang | 74be52e | 2008-10-09 16:01:56 +0800 | [diff] [blame] | 1875 | |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1876 | static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1877 | { |
Xiao Guangrong | 5e1b3dd | 2010-04-28 11:55:06 +0800 | [diff] [blame] | 1878 | trace_kvm_mmu_unsync_page(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1879 | ++vcpu->kvm->stat.mmu_unsync; |
| 1880 | sp->unsync = 1; |
Marcelo Tosatti | 6cffe8c | 2008-12-01 22:32:04 -0200 | [diff] [blame] | 1881 | |
Xiao Guangrong | 6b18493 | 2010-04-16 21:29:17 +0800 | [diff] [blame] | 1882 | kvm_mmu_mark_parents_unsync(sp); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1883 | mmu_convert_notrap(sp); |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1884 | } |
| 1885 | |
| 1886 | static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 1887 | { |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1888 | struct kvm_mmu_page *s; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1889 | struct hlist_node *node; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1890 | |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1891 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { |
Xiao Guangrong | 7ae680e | 2010-06-04 21:53:07 +0800 | [diff] [blame] | 1892 | if (s->unsync) |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1893 | continue; |
| 1894 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); |
| 1895 | __kvm_unsync_page(vcpu, s); |
| 1896 | } |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1897 | } |
| 1898 | |
| 1899 | static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 1900 | bool can_unsync) |
| 1901 | { |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1902 | struct kvm_mmu_page *s; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1903 | struct hlist_node *node; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1904 | bool need_unsync = false; |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1905 | |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 1906 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 1907 | if (!can_unsync) |
| 1908 | return 1; |
| 1909 | |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1910 | if (s->role.level != PT_PAGE_TABLE_LEVEL) |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1911 | return 1; |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1912 | |
| 1913 | if (!need_unsync && !s->unsync) { |
Xiao Guangrong | 36a2e67 | 2010-06-30 16:02:02 +0800 | [diff] [blame] | 1914 | if (!oos_shadow) |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1915 | return 1; |
| 1916 | need_unsync = true; |
| 1917 | } |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1918 | } |
Xiao Guangrong | 9cf5cf5 | 2010-05-24 15:40:07 +0800 | [diff] [blame] | 1919 | if (need_unsync) |
| 1920 | kvm_unsync_pages(vcpu, gfn); |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1921 | return 0; |
| 1922 | } |
| 1923 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 1924 | static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 1925 | unsigned pte_access, int user_fault, |
Xiao Guangrong | 640d9b0 | 2011-07-12 03:24:39 +0800 | [diff] [blame] | 1926 | int write_fault, int level, |
Marcelo Tosatti | c2d0ee4 | 2009-04-05 14:54:47 -0300 | [diff] [blame] | 1927 | gfn_t gfn, pfn_t pfn, bool speculative, |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 1928 | bool can_unsync, bool host_writable) |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1929 | { |
Xiao Guangrong | b330aa0 | 2010-11-19 17:02:35 +0800 | [diff] [blame] | 1930 | u64 spte, entry = *sptep; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 1931 | int ret = 0; |
Sheng Yang | 64d4d52 | 2008-10-09 16:01:57 +0800 | [diff] [blame] | 1932 | |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1933 | /* |
| 1934 | * We don't set the accessed bit, since we sometimes want to see |
| 1935 | * whether the guest actually used the pte (in order to detect |
| 1936 | * demand paging). |
| 1937 | */ |
Marcelo Tosatti | 982c256 | 2010-10-22 14:18:16 -0200 | [diff] [blame] | 1938 | spte = PT_PRESENT_MASK; |
Avi Kivity | 947da53 | 2008-03-18 11:05:52 +0200 | [diff] [blame] | 1939 | if (!speculative) |
Avi Kivity | 3201b5d | 2008-08-27 20:01:04 +0300 | [diff] [blame] | 1940 | spte |= shadow_accessed_mask; |
Xiao Guangrong | 640d9b0 | 2011-07-12 03:24:39 +0800 | [diff] [blame] | 1941 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 1942 | if (pte_access & ACC_EXEC_MASK) |
| 1943 | spte |= shadow_x_mask; |
| 1944 | else |
| 1945 | spte |= shadow_nx_mask; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1946 | if (pte_access & ACC_USER_MASK) |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 1947 | spte |= shadow_user_mask; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 1948 | if (level > PT_PAGE_TABLE_LEVEL) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 1949 | spte |= PT_PAGE_SIZE_MASK; |
Avi Kivity | b0bc3ee | 2010-09-13 16:45:28 +0200 | [diff] [blame] | 1950 | if (tdp_enabled) |
Sheng Yang | 4b12f0d | 2009-04-27 20:35:42 +0800 | [diff] [blame] | 1951 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, |
| 1952 | kvm_is_mmio_pfn(pfn)); |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1953 | |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 1954 | if (host_writable) |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 1955 | spte |= SPTE_HOST_WRITEABLE; |
Xiao Guangrong | f8e453b | 2010-12-23 16:09:29 +0800 | [diff] [blame] | 1956 | else |
| 1957 | pte_access &= ~ACC_WRITE_MASK; |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 1958 | |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 1959 | spte |= (u64)pfn << PAGE_SHIFT; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1960 | |
| 1961 | if ((pte_access & ACC_WRITE_MASK) |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 1962 | || (!vcpu->arch.mmu.direct_map && write_fault |
| 1963 | && !is_write_protection(vcpu) && !user_fault)) { |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1964 | |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 1965 | if (level > PT_PAGE_TABLE_LEVEL && |
| 1966 | has_wrprotected_page(vcpu->kvm, gfn, level)) { |
Marcelo Tosatti | 38187c8 | 2008-09-23 13:18:32 -0300 | [diff] [blame] | 1967 | ret = 1; |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 1968 | drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); |
| 1969 | goto done; |
Marcelo Tosatti | 38187c8 | 2008-09-23 13:18:32 -0300 | [diff] [blame] | 1970 | } |
| 1971 | |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1972 | spte |= PT_WRITABLE_MASK; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 1973 | |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 1974 | if (!vcpu->arch.mmu.direct_map |
Avi Kivity | 411c588 | 2011-06-06 16:11:54 +0300 | [diff] [blame] | 1975 | && !(pte_access & ACC_WRITE_MASK)) { |
Avi Kivity | 69325a1 | 2010-05-27 14:35:58 +0300 | [diff] [blame] | 1976 | spte &= ~PT_USER_MASK; |
Avi Kivity | 411c588 | 2011-06-06 16:11:54 +0300 | [diff] [blame] | 1977 | /* |
| 1978 | * If we converted a user page to a kernel page, |
| 1979 | * so that the kernel can write to it when cr0.wp=0, |
| 1980 | * then we should prevent the kernel from executing it |
| 1981 | * if SMEP is enabled. |
| 1982 | */ |
| 1983 | if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) |
| 1984 | spte |= PT64_NX_MASK; |
| 1985 | } |
Avi Kivity | 69325a1 | 2010-05-27 14:35:58 +0300 | [diff] [blame] | 1986 | |
Marcelo Tosatti | ecc5589 | 2008-11-25 15:58:07 +0100 | [diff] [blame] | 1987 | /* |
| 1988 | * Optimization: for pte sync, if spte was writable the hash |
| 1989 | * lookup is unnecessary (and expensive). Write protection |
| 1990 | * is responsibility of mmu_get_page / kvm_sync_page. |
| 1991 | * Same reasoning can be applied to dirty page accounting. |
| 1992 | */ |
Takuya Yoshikawa | 8dae444 | 2010-01-18 18:45:10 +0900 | [diff] [blame] | 1993 | if (!can_unsync && is_writable_pte(*sptep)) |
Marcelo Tosatti | ecc5589 | 2008-11-25 15:58:07 +0100 | [diff] [blame] | 1994 | goto set_pte; |
| 1995 | |
Marcelo Tosatti | 4731d4c | 2008-09-23 13:18:39 -0300 | [diff] [blame] | 1996 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { |
Xiao Guangrong | 9ad17b1 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 1997 | pgprintk("%s: found shadow page for %llx, marking ro\n", |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 1998 | __func__, gfn); |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 1999 | ret = 1; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2000 | pte_access &= ~ACC_WRITE_MASK; |
Takuya Yoshikawa | 8dae444 | 2010-01-18 18:45:10 +0900 | [diff] [blame] | 2001 | if (is_writable_pte(spte)) |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2002 | spte &= ~PT_WRITABLE_MASK; |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2003 | } |
| 2004 | } |
| 2005 | |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2006 | if (pte_access & ACC_WRITE_MASK) |
| 2007 | mark_page_dirty(vcpu->kvm, gfn); |
| 2008 | |
Marcelo Tosatti | 38187c8 | 2008-09-23 13:18:32 -0300 | [diff] [blame] | 2009 | set_pte: |
Avi Kivity | b79b93f | 2010-06-06 15:46:44 +0300 | [diff] [blame] | 2010 | update_spte(sptep, spte); |
Xiao Guangrong | b330aa0 | 2010-11-19 17:02:35 +0800 | [diff] [blame] | 2011 | /* |
| 2012 | * If we overwrite a writable spte with a read-only one we |
| 2013 | * should flush remote TLBs. Otherwise rmap_write_protect |
| 2014 | * will find a read-only spte, even though the writable spte |
| 2015 | * might be cached on a CPU's TLB. |
| 2016 | */ |
| 2017 | if (is_writable_pte(entry) && !is_writable_pte(*sptep)) |
| 2018 | kvm_flush_remote_tlbs(vcpu->kvm); |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 2019 | done: |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2020 | return ret; |
| 2021 | } |
| 2022 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2023 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2024 | unsigned pt_access, unsigned pte_access, |
Xiao Guangrong | 640d9b0 | 2011-07-12 03:24:39 +0800 | [diff] [blame] | 2025 | int user_fault, int write_fault, |
Xiao Guangrong | b90a0e6 | 2011-07-12 03:25:56 +0800 | [diff] [blame] | 2026 | int *emulate, int level, gfn_t gfn, |
Izik Eidus | 1403283 | 2009-09-23 21:47:17 +0300 | [diff] [blame] | 2027 | pfn_t pfn, bool speculative, |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 2028 | bool host_writable) |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2029 | { |
| 2030 | int was_rmapped = 0; |
Marcelo Tosatti | 53a27b3 | 2009-08-05 15:43:58 -0300 | [diff] [blame] | 2031 | int rmap_count; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2032 | |
| 2033 | pgprintk("%s: spte %llx access %x write_fault %d" |
Xiao Guangrong | 9ad17b1 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2034 | " user_fault %d gfn %llx\n", |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2035 | __func__, *sptep, pt_access, |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2036 | write_fault, user_fault, gfn); |
| 2037 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2038 | if (is_rmap_spte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2039 | /* |
| 2040 | * If we overwrite a PTE page pointer with a 2MB PMD, unlink |
| 2041 | * the parent of the now unreachable PTE. |
| 2042 | */ |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2043 | if (level > PT_PAGE_TABLE_LEVEL && |
| 2044 | !is_large_pte(*sptep)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2045 | struct kvm_mmu_page *child; |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2046 | u64 pte = *sptep; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2047 | |
| 2048 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
Xiao Guangrong | bcdd9a9 | 2011-05-15 23:28:29 +0800 | [diff] [blame] | 2049 | drop_parent_pte(child, sptep); |
Marcelo Tosatti | 3be2264 | 2010-05-28 09:44:59 -0300 | [diff] [blame] | 2050 | kvm_flush_remote_tlbs(vcpu->kvm); |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2051 | } else if (pfn != spte_to_pfn(*sptep)) { |
Xiao Guangrong | 9ad17b1 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2052 | pgprintk("hfn old %llx new %llx\n", |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2053 | spte_to_pfn(*sptep), pfn); |
Avi Kivity | be38d27 | 2010-06-06 14:31:27 +0300 | [diff] [blame] | 2054 | drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); |
Xiao Guangrong | 9154635 | 2010-06-30 16:04:06 +0800 | [diff] [blame] | 2055 | kvm_flush_remote_tlbs(vcpu->kvm); |
Joerg Roedel | 6bed6b9 | 2009-02-18 14:08:59 +0100 | [diff] [blame] | 2056 | } else |
| 2057 | was_rmapped = 1; |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2058 | } |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2059 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2060 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, |
Xiao Guangrong | 640d9b0 | 2011-07-12 03:24:39 +0800 | [diff] [blame] | 2061 | level, gfn, pfn, speculative, true, |
Lai Jiangshan | 9bdbba1 | 2010-11-19 17:03:22 +0800 | [diff] [blame] | 2062 | host_writable)) { |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2063 | if (write_fault) |
Xiao Guangrong | b90a0e6 | 2011-07-12 03:25:56 +0800 | [diff] [blame] | 2064 | *emulate = 1; |
Xiao Guangrong | 5304efd | 2010-06-08 20:05:57 +0800 | [diff] [blame] | 2065 | kvm_mmu_flush_tlb(vcpu); |
Marcelo Tosatti | a378b4e | 2008-09-23 13:18:31 -0300 | [diff] [blame] | 2066 | } |
Marcelo Tosatti | 1e73f9d | 2008-09-23 13:18:30 -0300 | [diff] [blame] | 2067 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2068 | pgprintk("%s: setting spte %llx\n", __func__, *sptep); |
Xiao Guangrong | 9ad17b1 | 2010-08-28 19:19:42 +0800 | [diff] [blame] | 2069 | pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2070 | is_large_pte(*sptep)? "2MB" : "4kB", |
Joerg Roedel | a205bc1 | 2009-07-09 16:36:01 +0200 | [diff] [blame] | 2071 | *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, |
| 2072 | *sptep, sptep); |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2073 | if (!was_rmapped && is_large_pte(*sptep)) |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 2074 | ++vcpu->kvm->stat.lpages; |
| 2075 | |
Xiao Guangrong | ffb61bb | 2011-07-12 03:22:01 +0800 | [diff] [blame] | 2076 | if (is_shadow_present_pte(*sptep)) { |
| 2077 | page_header_update_slot(vcpu->kvm, sptep, gfn); |
| 2078 | if (!was_rmapped) { |
| 2079 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 2080 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 2081 | rmap_recycle(vcpu, sptep, gfn); |
| 2082 | } |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2083 | } |
Xiao Guangrong | 9ed5520 | 2010-07-16 11:25:17 +0800 | [diff] [blame] | 2084 | kvm_release_pfn_clean(pfn); |
Avi Kivity | 1b7fcd3 | 2008-05-15 13:51:35 +0300 | [diff] [blame] | 2085 | if (speculative) { |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2086 | vcpu->arch.last_pte_updated = sptep; |
Avi Kivity | 1b7fcd3 | 2008-05-15 13:51:35 +0300 | [diff] [blame] | 2087 | vcpu->arch.last_pte_gfn = gfn; |
| 2088 | } |
Avi Kivity | 1c4f1fd | 2007-12-09 17:40:31 +0200 | [diff] [blame] | 2089 | } |
| 2090 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2091 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
| 2092 | { |
| 2093 | } |
| 2094 | |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2095 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2096 | bool no_dirty_log) |
| 2097 | { |
| 2098 | struct kvm_memory_slot *slot; |
| 2099 | unsigned long hva; |
| 2100 | |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 2101 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2102 | if (!slot) { |
| 2103 | get_page(bad_page); |
| 2104 | return page_to_pfn(bad_page); |
| 2105 | } |
| 2106 | |
| 2107 | hva = gfn_to_hva_memslot(slot, gfn); |
| 2108 | |
| 2109 | return hva_to_pfn_atomic(vcpu->kvm, hva); |
| 2110 | } |
| 2111 | |
| 2112 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
| 2113 | struct kvm_mmu_page *sp, |
| 2114 | u64 *start, u64 *end) |
| 2115 | { |
| 2116 | struct page *pages[PTE_PREFETCH_NUM]; |
| 2117 | unsigned access = sp->role.access; |
| 2118 | int i, ret; |
| 2119 | gfn_t gfn; |
| 2120 | |
| 2121 | gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); |
Xiao Guangrong | 5d163b1 | 2011-03-09 15:43:00 +0800 | [diff] [blame] | 2122 | if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK)) |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2123 | return -1; |
| 2124 | |
| 2125 | ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); |
| 2126 | if (ret <= 0) |
| 2127 | return -1; |
| 2128 | |
| 2129 | for (i = 0; i < ret; i++, gfn++, start++) |
| 2130 | mmu_set_spte(vcpu, start, ACC_ALL, |
Xiao Guangrong | 640d9b0 | 2011-07-12 03:24:39 +0800 | [diff] [blame] | 2131 | access, 0, 0, NULL, |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2132 | sp->role.level, gfn, |
| 2133 | page_to_pfn(pages[i]), true, true); |
| 2134 | |
| 2135 | return 0; |
| 2136 | } |
| 2137 | |
| 2138 | static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, |
| 2139 | struct kvm_mmu_page *sp, u64 *sptep) |
| 2140 | { |
| 2141 | u64 *spte, *start = NULL; |
| 2142 | int i; |
| 2143 | |
| 2144 | WARN_ON(!sp->role.direct); |
| 2145 | |
| 2146 | i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); |
| 2147 | spte = sp->spt + i; |
| 2148 | |
| 2149 | for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { |
| 2150 | if (*spte != shadow_trap_nonpresent_pte || spte == sptep) { |
| 2151 | if (!start) |
| 2152 | continue; |
| 2153 | if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) |
| 2154 | break; |
| 2155 | start = NULL; |
| 2156 | } else if (!start) |
| 2157 | start = spte; |
| 2158 | } |
| 2159 | } |
| 2160 | |
| 2161 | static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) |
| 2162 | { |
| 2163 | struct kvm_mmu_page *sp; |
| 2164 | |
| 2165 | /* |
| 2166 | * Since it's no accessed bit on EPT, it's no way to |
| 2167 | * distinguish between actually accessed translations |
| 2168 | * and prefetched, so disable pte prefetch if EPT is |
| 2169 | * enabled. |
| 2170 | */ |
| 2171 | if (!shadow_accessed_mask) |
| 2172 | return; |
| 2173 | |
| 2174 | sp = page_header(__pa(sptep)); |
| 2175 | if (sp->role.level > PT_PAGE_TABLE_LEVEL) |
| 2176 | return; |
| 2177 | |
| 2178 | __direct_pte_prefetch(vcpu, sp, sptep); |
| 2179 | } |
| 2180 | |
Joerg Roedel | 4d9976b | 2008-02-07 13:47:42 +0100 | [diff] [blame] | 2181 | static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, |
Xiao Guangrong | 2ec4739 | 2010-12-07 10:34:42 +0800 | [diff] [blame] | 2182 | int map_writable, int level, gfn_t gfn, pfn_t pfn, |
| 2183 | bool prefault) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2184 | { |
Avi Kivity | 9f652d2 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2185 | struct kvm_shadow_walk_iterator iterator; |
| 2186 | struct kvm_mmu_page *sp; |
Xiao Guangrong | b90a0e6 | 2011-07-12 03:25:56 +0800 | [diff] [blame] | 2187 | int emulate = 0; |
Avi Kivity | 9f652d2 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2188 | gfn_t pseudo_gfn; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2189 | |
Avi Kivity | 9f652d2 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2190 | for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2191 | if (iterator.level == level) { |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2192 | unsigned pte_access = ACC_ALL; |
| 2193 | |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2194 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, |
Xiao Guangrong | b90a0e6 | 2011-07-12 03:25:56 +0800 | [diff] [blame] | 2195 | 0, write, &emulate, |
Xiao Guangrong | 2ec4739 | 2010-12-07 10:34:42 +0800 | [diff] [blame] | 2196 | level, gfn, pfn, prefault, map_writable); |
Xiao Guangrong | 957ed9e | 2010-08-22 19:12:48 +0800 | [diff] [blame] | 2197 | direct_pte_prefetch(vcpu, iterator.sptep); |
Avi Kivity | 9f652d2 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2198 | ++vcpu->stat.pf_fixed; |
| 2199 | break; |
| 2200 | } |
| 2201 | |
| 2202 | if (*iterator.sptep == shadow_trap_nonpresent_pte) { |
Lai Jiangshan | c9fa0b3 | 2010-05-26 16:48:25 +0800 | [diff] [blame] | 2203 | u64 base_addr = iterator.addr; |
| 2204 | |
| 2205 | base_addr &= PT64_LVL_ADDR_MASK(iterator.level); |
| 2206 | pseudo_gfn = base_addr >> PAGE_SHIFT; |
Avi Kivity | 9f652d2 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2207 | sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, |
| 2208 | iterator.level - 1, |
| 2209 | 1, ACC_ALL, iterator.sptep); |
| 2210 | if (!sp) { |
| 2211 | pgprintk("nonpaging_map: ENOMEM\n"); |
| 2212 | kvm_release_pfn_clean(pfn); |
| 2213 | return -ENOMEM; |
| 2214 | } |
| 2215 | |
Avi Kivity | d555c33 | 2009-06-10 14:24:23 +0300 | [diff] [blame] | 2216 | __set_spte(iterator.sptep, |
| 2217 | __pa(sp->spt) |
| 2218 | | PT_PRESENT_MASK | PT_WRITABLE_MASK |
Xiao Guangrong | 33f91ed | 2010-09-27 18:05:00 +0800 | [diff] [blame] | 2219 | | shadow_user_mask | shadow_x_mask |
| 2220 | | shadow_accessed_mask); |
Avi Kivity | 9f652d2 | 2008-12-25 14:54:25 +0200 | [diff] [blame] | 2221 | } |
| 2222 | } |
Xiao Guangrong | b90a0e6 | 2011-07-12 03:25:56 +0800 | [diff] [blame] | 2223 | return emulate; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2224 | } |
| 2225 | |
Huang Ying | 77db5cb | 2010-10-08 16:24:15 +0800 | [diff] [blame] | 2226 | static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2227 | { |
Huang Ying | 77db5cb | 2010-10-08 16:24:15 +0800 | [diff] [blame] | 2228 | siginfo_t info; |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2229 | |
Huang Ying | 77db5cb | 2010-10-08 16:24:15 +0800 | [diff] [blame] | 2230 | info.si_signo = SIGBUS; |
| 2231 | info.si_errno = 0; |
| 2232 | info.si_code = BUS_MCEERR_AR; |
| 2233 | info.si_addr = (void __user *)address; |
| 2234 | info.si_addr_lsb = PAGE_SHIFT; |
| 2235 | |
| 2236 | send_sig_info(SIGBUS, &info, tsk); |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2237 | } |
| 2238 | |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 2239 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva, |
| 2240 | unsigned access, gfn_t gfn, pfn_t pfn) |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2241 | { |
| 2242 | kvm_release_pfn_clean(pfn); |
| 2243 | if (is_hwpoison_pfn(pfn)) { |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 2244 | kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2245 | return 0; |
Gleb Natapov | edba23e | 2010-07-07 20:16:45 +0300 | [diff] [blame] | 2246 | } else if (is_fault_pfn(pfn)) |
| 2247 | return -EFAULT; |
| 2248 | |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 2249 | vcpu_cache_mmio_info(vcpu, gva, gfn, access); |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2250 | return 1; |
| 2251 | } |
| 2252 | |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2253 | static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, |
| 2254 | gfn_t *gfnp, pfn_t *pfnp, int *levelp) |
| 2255 | { |
| 2256 | pfn_t pfn = *pfnp; |
| 2257 | gfn_t gfn = *gfnp; |
| 2258 | int level = *levelp; |
| 2259 | |
| 2260 | /* |
| 2261 | * Check if it's a transparent hugepage. If this would be an |
| 2262 | * hugetlbfs page, level wouldn't be set to |
| 2263 | * PT_PAGE_TABLE_LEVEL and there would be no adjustment done |
| 2264 | * here. |
| 2265 | */ |
| 2266 | if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && |
| 2267 | level == PT_PAGE_TABLE_LEVEL && |
| 2268 | PageTransCompound(pfn_to_page(pfn)) && |
| 2269 | !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { |
| 2270 | unsigned long mask; |
| 2271 | /* |
| 2272 | * mmu_notifier_retry was successful and we hold the |
| 2273 | * mmu_lock here, so the pmd can't become splitting |
| 2274 | * from under us, and in turn |
| 2275 | * __split_huge_page_refcount() can't run from under |
| 2276 | * us and we can safely transfer the refcount from |
| 2277 | * PG_tail to PG_head as we switch the pfn to tail to |
| 2278 | * head. |
| 2279 | */ |
| 2280 | *levelp = level = PT_DIRECTORY_LEVEL; |
| 2281 | mask = KVM_PAGES_PER_HPAGE(level) - 1; |
| 2282 | VM_BUG_ON((gfn & mask) != (pfn & mask)); |
| 2283 | if (pfn & mask) { |
| 2284 | gfn &= ~mask; |
| 2285 | *gfnp = gfn; |
| 2286 | kvm_release_pfn_clean(pfn); |
| 2287 | pfn &= ~mask; |
| 2288 | if (!get_page_unless_zero(pfn_to_page(pfn))) |
| 2289 | BUG(); |
| 2290 | *pfnp = pfn; |
| 2291 | } |
| 2292 | } |
| 2293 | } |
| 2294 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2295 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
Xiao Guangrong | 060c2ab | 2010-11-12 14:49:11 +0800 | [diff] [blame] | 2296 | gva_t gva, pfn_t *pfn, bool write, bool *writable); |
| 2297 | |
| 2298 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2299 | bool prefault) |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 2300 | { |
| 2301 | int r; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2302 | int level; |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2303 | int force_pt_level; |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2304 | pfn_t pfn; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2305 | unsigned long mmu_seq; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2306 | bool map_writable; |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2307 | |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2308 | force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); |
| 2309 | if (likely(!force_pt_level)) { |
| 2310 | level = mapping_level(vcpu, gfn); |
| 2311 | /* |
| 2312 | * This path builds a PAE pagetable - so we can map |
| 2313 | * 2mb pages at maximum. Therefore check if the level |
| 2314 | * is larger than that. |
| 2315 | */ |
| 2316 | if (level > PT_DIRECTORY_LEVEL) |
| 2317 | level = PT_DIRECTORY_LEVEL; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2318 | |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2319 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); |
| 2320 | } else |
| 2321 | level = PT_PAGE_TABLE_LEVEL; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 2322 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2323 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
Marcelo Tosatti | 4c2155c | 2008-09-16 20:54:47 -0300 | [diff] [blame] | 2324 | smp_rmb(); |
Xiao Guangrong | 060c2ab | 2010-11-12 14:49:11 +0800 | [diff] [blame] | 2325 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2326 | if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) |
Xiao Guangrong | 060c2ab | 2010-11-12 14:49:11 +0800 | [diff] [blame] | 2327 | return 0; |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2328 | |
Avi Kivity | d196e34 | 2008-01-24 11:44:11 +0200 | [diff] [blame] | 2329 | /* mmio */ |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2330 | if (is_error_pfn(pfn)) |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 2331 | return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn); |
Avi Kivity | d196e34 | 2008-01-24 11:44:11 +0200 | [diff] [blame] | 2332 | |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2333 | spin_lock(&vcpu->kvm->mmu_lock); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2334 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
| 2335 | goto out_unlock; |
Avi Kivity | eb787d1 | 2007-12-31 15:27:49 +0200 | [diff] [blame] | 2336 | kvm_mmu_free_some_pages(vcpu); |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2337 | if (likely(!force_pt_level)) |
| 2338 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); |
Xiao Guangrong | 2ec4739 | 2010-12-07 10:34:42 +0800 | [diff] [blame] | 2339 | r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, |
| 2340 | prefault); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2341 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2342 | |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2343 | |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 2344 | return r; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2345 | |
| 2346 | out_unlock: |
| 2347 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2348 | kvm_release_pfn_clean(pfn); |
| 2349 | return 0; |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 2350 | } |
| 2351 | |
| 2352 | |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2353 | static void mmu_free_roots(struct kvm_vcpu *vcpu) |
| 2354 | { |
| 2355 | int i; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2356 | struct kvm_mmu_page *sp; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2357 | LIST_HEAD(invalid_list); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2358 | |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2359 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
Avi Kivity | 7b53aa5 | 2007-06-05 12:17:03 +0300 | [diff] [blame] | 2360 | return; |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2361 | spin_lock(&vcpu->kvm->mmu_lock); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2362 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && |
| 2363 | (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || |
| 2364 | vcpu->arch.mmu.direct_map)) { |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2365 | hpa_t root = vcpu->arch.mmu.root_hpa; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2366 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2367 | sp = page_header(root); |
| 2368 | --sp->root_count; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2369 | if (!sp->root_count && sp->role.invalid) { |
| 2370 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
| 2371 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 2372 | } |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2373 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2374 | spin_unlock(&vcpu->kvm->mmu_lock); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2375 | return; |
| 2376 | } |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2377 | for (i = 0; i < 4; ++i) { |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2378 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2379 | |
Avi Kivity | 417726a | 2007-04-12 17:35:58 +0300 | [diff] [blame] | 2380 | if (root) { |
Avi Kivity | 417726a | 2007-04-12 17:35:58 +0300 | [diff] [blame] | 2381 | root &= PT64_BASE_ADDR_MASK; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2382 | sp = page_header(root); |
| 2383 | --sp->root_count; |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 2384 | if (!sp->root_count && sp->role.invalid) |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2385 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, |
| 2386 | &invalid_list); |
Avi Kivity | 417726a | 2007-04-12 17:35:58 +0300 | [diff] [blame] | 2387 | } |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2388 | vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2389 | } |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 2390 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 2391 | spin_unlock(&vcpu->kvm->mmu_lock); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2392 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2393 | } |
| 2394 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 2395 | static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) |
| 2396 | { |
| 2397 | int ret = 0; |
| 2398 | |
| 2399 | if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 2400 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 2401 | ret = 1; |
| 2402 | } |
| 2403 | |
| 2404 | return ret; |
| 2405 | } |
| 2406 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2407 | static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) |
| 2408 | { |
| 2409 | struct kvm_mmu_page *sp; |
Avi Kivity | 7ebaf15 | 2010-10-03 18:51:39 +0200 | [diff] [blame] | 2410 | unsigned i; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2411 | |
| 2412 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
| 2413 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2414 | kvm_mmu_free_some_pages(vcpu); |
| 2415 | sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, |
| 2416 | 1, ACC_ALL, NULL); |
| 2417 | ++sp->root_count; |
| 2418 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2419 | vcpu->arch.mmu.root_hpa = __pa(sp->spt); |
| 2420 | } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { |
| 2421 | for (i = 0; i < 4; ++i) { |
| 2422 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
| 2423 | |
| 2424 | ASSERT(!VALID_PAGE(root)); |
| 2425 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2426 | kvm_mmu_free_some_pages(vcpu); |
Avi Kivity | 649497d | 2010-12-28 12:09:07 +0200 | [diff] [blame] | 2427 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), |
| 2428 | i << 30, |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2429 | PT32_ROOT_LEVEL, 1, ACC_ALL, |
| 2430 | NULL); |
| 2431 | root = __pa(sp->spt); |
| 2432 | ++sp->root_count; |
| 2433 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2434 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2435 | } |
Xiao Guangrong | 6292757 | 2010-09-27 18:02:12 +0800 | [diff] [blame] | 2436 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2437 | } else |
| 2438 | BUG(); |
| 2439 | |
| 2440 | return 0; |
| 2441 | } |
| 2442 | |
| 2443 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2444 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2445 | struct kvm_mmu_page *sp; |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2446 | u64 pdptr, pm_mask; |
| 2447 | gfn_t root_gfn; |
| 2448 | int i; |
Avi Kivity | 3bb65a2 | 2007-01-05 16:36:51 -0800 | [diff] [blame] | 2449 | |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 2450 | root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2451 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2452 | if (mmu_check_root(vcpu, root_gfn)) |
| 2453 | return 1; |
| 2454 | |
| 2455 | /* |
| 2456 | * Do we shadow a long mode page table? If so we need to |
| 2457 | * write-protect the guests page table root. |
| 2458 | */ |
| 2459 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2460 | hpa_t root = vcpu->arch.mmu.root_hpa; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2461 | |
| 2462 | ASSERT(!VALID_PAGE(root)); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2463 | |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 2464 | spin_lock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 24955b6 | 2010-05-12 21:00:35 -0300 | [diff] [blame] | 2465 | kvm_mmu_free_some_pages(vcpu); |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2466 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, |
| 2467 | 0, ACC_ALL, NULL); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2468 | root = __pa(sp->spt); |
| 2469 | ++sp->root_count; |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 2470 | spin_unlock(&vcpu->kvm->mmu_lock); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2471 | vcpu->arch.mmu.root_hpa = root; |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 2472 | return 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2473 | } |
Joerg Roedel | f87f928 | 2010-09-02 17:29:45 +0200 | [diff] [blame] | 2474 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2475 | /* |
| 2476 | * We shadow a 32 bit page table. This may be a legacy 2-level |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2477 | * or a PAE 3-level page table. In either case we need to be aware that |
| 2478 | * the shadow page table may be a PAE or a long mode page table. |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2479 | */ |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2480 | pm_mask = PT_PRESENT_MASK; |
| 2481 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) |
| 2482 | pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; |
| 2483 | |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2484 | for (i = 0; i < 4; ++i) { |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2485 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2486 | |
| 2487 | ASSERT(!VALID_PAGE(root)); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2488 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { |
Joerg Roedel | d41d189 | 2010-09-10 17:30:58 +0200 | [diff] [blame] | 2489 | pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i); |
Avi Kivity | 43a3795 | 2009-06-10 14:12:05 +0300 | [diff] [blame] | 2490 | if (!is_present_gpte(pdptr)) { |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2491 | vcpu->arch.mmu.pae_root[i] = 0; |
Avi Kivity | 417726a | 2007-04-12 17:35:58 +0300 | [diff] [blame] | 2492 | continue; |
| 2493 | } |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 2494 | root_gfn = pdptr >> PAGE_SHIFT; |
Joerg Roedel | f87f928 | 2010-09-02 17:29:45 +0200 | [diff] [blame] | 2495 | if (mmu_check_root(vcpu, root_gfn)) |
| 2496 | return 1; |
Eric Northup | 5a7388c | 2010-04-26 17:00:05 -0700 | [diff] [blame] | 2497 | } |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 2498 | spin_lock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 24955b6 | 2010-05-12 21:00:35 -0300 | [diff] [blame] | 2499 | kvm_mmu_free_some_pages(vcpu); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2500 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2501 | PT32_ROOT_LEVEL, 0, |
Avi Kivity | f7d9c7b | 2008-02-26 22:12:10 +0200 | [diff] [blame] | 2502 | ACC_ALL, NULL); |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 2503 | root = __pa(sp->spt); |
| 2504 | ++sp->root_count; |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 2505 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2506 | |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2507 | vcpu->arch.mmu.pae_root[i] = root | pm_mask; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2508 | } |
Xiao Guangrong | 6292757 | 2010-09-27 18:02:12 +0800 | [diff] [blame] | 2509 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2510 | |
| 2511 | /* |
| 2512 | * If we shadow a 32 bit page table with a long mode page |
| 2513 | * table we enter this path. |
| 2514 | */ |
| 2515 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
| 2516 | if (vcpu->arch.mmu.lm_root == NULL) { |
| 2517 | /* |
| 2518 | * The additional page necessary for this is only |
| 2519 | * allocated on demand. |
| 2520 | */ |
| 2521 | |
| 2522 | u64 *lm_root; |
| 2523 | |
| 2524 | lm_root = (void*)get_zeroed_page(GFP_KERNEL); |
| 2525 | if (lm_root == NULL) |
| 2526 | return 1; |
| 2527 | |
| 2528 | lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; |
| 2529 | |
| 2530 | vcpu->arch.mmu.lm_root = lm_root; |
| 2531 | } |
| 2532 | |
| 2533 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); |
| 2534 | } |
| 2535 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 2536 | return 0; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2537 | } |
| 2538 | |
Joerg Roedel | 651dd37 | 2010-09-10 17:30:59 +0200 | [diff] [blame] | 2539 | static int mmu_alloc_roots(struct kvm_vcpu *vcpu) |
| 2540 | { |
| 2541 | if (vcpu->arch.mmu.direct_map) |
| 2542 | return mmu_alloc_direct_roots(vcpu); |
| 2543 | else |
| 2544 | return mmu_alloc_shadow_roots(vcpu); |
| 2545 | } |
| 2546 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 2547 | static void mmu_sync_roots(struct kvm_vcpu *vcpu) |
| 2548 | { |
| 2549 | int i; |
| 2550 | struct kvm_mmu_page *sp; |
| 2551 | |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2552 | if (vcpu->arch.mmu.direct_map) |
| 2553 | return; |
| 2554 | |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 2555 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
| 2556 | return; |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 2557 | |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 2558 | vcpu_clear_mmio_info(vcpu, ~0ul); |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 2559 | trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 2560 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 2561 | hpa_t root = vcpu->arch.mmu.root_hpa; |
| 2562 | sp = page_header(root); |
| 2563 | mmu_sync_children(vcpu, sp); |
Xiao Guangrong | 5054c0d | 2010-11-12 14:46:08 +0800 | [diff] [blame] | 2564 | trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 2565 | return; |
| 2566 | } |
| 2567 | for (i = 0; i < 4; ++i) { |
| 2568 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
| 2569 | |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 2570 | if (root && VALID_PAGE(root)) { |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 2571 | root &= PT64_BASE_ADDR_MASK; |
| 2572 | sp = page_header(root); |
| 2573 | mmu_sync_children(vcpu, sp); |
| 2574 | } |
| 2575 | } |
Xiao Guangrong | 6903074 | 2010-09-27 18:09:29 +0800 | [diff] [blame] | 2576 | trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 2577 | } |
| 2578 | |
| 2579 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) |
| 2580 | { |
| 2581 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2582 | mmu_sync_roots(vcpu); |
| 2583 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2584 | } |
| 2585 | |
Gleb Natapov | 1871c60 | 2010-02-10 14:21:32 +0200 | [diff] [blame] | 2586 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 2587 | u32 access, struct x86_exception *exception) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2588 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 2589 | if (exception) |
| 2590 | exception->error_code = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2591 | return vaddr; |
| 2592 | } |
| 2593 | |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 2594 | static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 2595 | u32 access, |
| 2596 | struct x86_exception *exception) |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 2597 | { |
Avi Kivity | ab9ae31 | 2010-11-22 17:53:26 +0200 | [diff] [blame] | 2598 | if (exception) |
| 2599 | exception->error_code = 0; |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 2600 | return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); |
| 2601 | } |
| 2602 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2603 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2604 | u32 error_code, bool prefault) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2605 | { |
Avi Kivity | e833240 | 2007-12-09 18:43:00 +0200 | [diff] [blame] | 2606 | gfn_t gfn; |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 2607 | int r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2608 | |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 2609 | pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 2610 | r = mmu_topup_memory_caches(vcpu); |
| 2611 | if (r) |
| 2612 | return r; |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 2613 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2614 | ASSERT(vcpu); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2615 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2616 | |
Avi Kivity | e833240 | 2007-12-09 18:43:00 +0200 | [diff] [blame] | 2617 | gfn = gva >> PAGE_SHIFT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2618 | |
Avi Kivity | e833240 | 2007-12-09 18:43:00 +0200 | [diff] [blame] | 2619 | return nonpaging_map(vcpu, gva & PAGE_MASK, |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2620 | error_code & PFERR_WRITE_MASK, gfn, prefault); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2621 | } |
| 2622 | |
Jan Kiszka | 7e1fbea | 2010-10-20 15:18:02 +0200 | [diff] [blame] | 2623 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2624 | { |
| 2625 | struct kvm_arch_async_pf arch; |
Xiao Guangrong | fb67e14 | 2010-12-07 10:35:25 +0800 | [diff] [blame] | 2626 | |
Gleb Natapov | 7c90705 | 2010-10-14 11:22:53 +0200 | [diff] [blame] | 2627 | arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2628 | arch.gfn = gfn; |
Xiao Guangrong | c4806ac | 2010-11-12 14:49:55 +0800 | [diff] [blame] | 2629 | arch.direct_map = vcpu->arch.mmu.direct_map; |
Xiao Guangrong | fb67e14 | 2010-12-07 10:35:25 +0800 | [diff] [blame] | 2630 | arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2631 | |
| 2632 | return kvm_setup_async_pf(vcpu, gva, gfn, &arch); |
| 2633 | } |
| 2634 | |
| 2635 | static bool can_do_async_pf(struct kvm_vcpu *vcpu) |
| 2636 | { |
| 2637 | if (unlikely(!irqchip_in_kernel(vcpu->kvm) || |
| 2638 | kvm_event_needs_reinjection(vcpu))) |
| 2639 | return false; |
| 2640 | |
| 2641 | return kvm_x86_ops->interrupt_allowed(vcpu); |
| 2642 | } |
| 2643 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2644 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2645 | gva_t gva, pfn_t *pfn, bool write, bool *writable) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2646 | { |
| 2647 | bool async; |
| 2648 | |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2649 | *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2650 | |
| 2651 | if (!async) |
| 2652 | return false; /* *pfn has correct page already */ |
| 2653 | |
| 2654 | put_page(pfn_to_page(*pfn)); |
| 2655 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2656 | if (!prefault && can_do_async_pf(vcpu)) { |
Xiao Guangrong | c9b263d | 2010-11-01 16:58:43 +0800 | [diff] [blame] | 2657 | trace_kvm_try_async_get_page(gva, gfn); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2658 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
| 2659 | trace_kvm_async_pf_doublefault(gva, gfn); |
| 2660 | kvm_make_request(KVM_REQ_APF_HALT, vcpu); |
| 2661 | return true; |
| 2662 | } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) |
| 2663 | return true; |
| 2664 | } |
| 2665 | |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2666 | *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2667 | |
| 2668 | return false; |
| 2669 | } |
| 2670 | |
Gleb Natapov | 56028d0 | 2010-10-17 18:13:42 +0200 | [diff] [blame] | 2671 | static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2672 | bool prefault) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2673 | { |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 2674 | pfn_t pfn; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2675 | int r; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2676 | int level; |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2677 | int force_pt_level; |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 2678 | gfn_t gfn = gpa >> PAGE_SHIFT; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2679 | unsigned long mmu_seq; |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2680 | int write = error_code & PFERR_WRITE_MASK; |
| 2681 | bool map_writable; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2682 | |
| 2683 | ASSERT(vcpu); |
| 2684 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
| 2685 | |
| 2686 | r = mmu_topup_memory_caches(vcpu); |
| 2687 | if (r) |
| 2688 | return r; |
| 2689 | |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2690 | force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); |
| 2691 | if (likely(!force_pt_level)) { |
| 2692 | level = mapping_level(vcpu, gfn); |
| 2693 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); |
| 2694 | } else |
| 2695 | level = PT_PAGE_TABLE_LEVEL; |
Joerg Roedel | 852e3c1 | 2009-07-27 16:30:44 +0200 | [diff] [blame] | 2696 | |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2697 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
Marcelo Tosatti | 4c2155c | 2008-09-16 20:54:47 -0300 | [diff] [blame] | 2698 | smp_rmb(); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2699 | |
Xiao Guangrong | 78b2c54 | 2010-12-07 10:48:06 +0800 | [diff] [blame] | 2700 | if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 2701 | return 0; |
| 2702 | |
| 2703 | /* mmio */ |
Huang Ying | bf99815 | 2010-05-31 14:28:19 +0800 | [diff] [blame] | 2704 | if (is_error_pfn(pfn)) |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 2705 | return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2706 | spin_lock(&vcpu->kvm->mmu_lock); |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2707 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
| 2708 | goto out_unlock; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2709 | kvm_mmu_free_some_pages(vcpu); |
Andrea Arcangeli | 936a5fe | 2011-01-13 15:46:48 -0800 | [diff] [blame] | 2710 | if (likely(!force_pt_level)) |
| 2711 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 2712 | r = __direct_map(vcpu, gpa, write, map_writable, |
Xiao Guangrong | 2ec4739 | 2010-12-07 10:34:42 +0800 | [diff] [blame] | 2713 | level, gfn, pfn, prefault); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2714 | spin_unlock(&vcpu->kvm->mmu_lock); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2715 | |
| 2716 | return r; |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 2717 | |
| 2718 | out_unlock: |
| 2719 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2720 | kvm_release_pfn_clean(pfn); |
| 2721 | return 0; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2722 | } |
| 2723 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2724 | static void nonpaging_free(struct kvm_vcpu *vcpu) |
| 2725 | { |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2726 | mmu_free_roots(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2727 | } |
| 2728 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2729 | static int nonpaging_init_context(struct kvm_vcpu *vcpu, |
| 2730 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2731 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2732 | context->new_cr3 = nonpaging_new_cr3; |
| 2733 | context->page_fault = nonpaging_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2734 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
| 2735 | context->free = nonpaging_free; |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 2736 | context->prefetch_page = nonpaging_prefetch_page; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2737 | context->sync_page = nonpaging_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2738 | context->invlpg = nonpaging_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2739 | context->update_pte = nonpaging_update_pte; |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2740 | context->root_level = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2741 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 2742 | context->root_hpa = INVALID_PAGE; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 2743 | context->direct_map = true; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2744 | context->nx = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2745 | return 0; |
| 2746 | } |
| 2747 | |
Avi Kivity | d835dfe | 2007-11-21 02:57:59 +0200 | [diff] [blame] | 2748 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2749 | { |
Avi Kivity | 1165f5f | 2007-04-19 17:27:43 +0300 | [diff] [blame] | 2750 | ++vcpu->stat.tlb_flush; |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 2751 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2752 | } |
| 2753 | |
| 2754 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
| 2755 | { |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 2756 | pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); |
Avi Kivity | cea0f0e | 2007-01-05 16:36:43 -0800 | [diff] [blame] | 2757 | mmu_free_roots(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2758 | } |
| 2759 | |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 2760 | static unsigned long get_cr3(struct kvm_vcpu *vcpu) |
| 2761 | { |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 2762 | return kvm_read_cr3(vcpu); |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 2763 | } |
| 2764 | |
Avi Kivity | 6389ee9 | 2010-11-29 16:12:30 +0200 | [diff] [blame] | 2765 | static void inject_page_fault(struct kvm_vcpu *vcpu, |
| 2766 | struct x86_exception *fault) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2767 | { |
Avi Kivity | 6389ee9 | 2010-11-29 16:12:30 +0200 | [diff] [blame] | 2768 | vcpu->arch.mmu.inject_page_fault(vcpu, fault); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2769 | } |
| 2770 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2771 | static void paging_free(struct kvm_vcpu *vcpu) |
| 2772 | { |
| 2773 | nonpaging_free(vcpu); |
| 2774 | } |
| 2775 | |
Joerg Roedel | 3241f22 | 2010-09-10 17:30:45 +0200 | [diff] [blame] | 2776 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2777 | { |
| 2778 | int bit7; |
| 2779 | |
| 2780 | bit7 = (gpte >> 7) & 1; |
Joerg Roedel | 3241f22 | 2010-09-10 17:30:45 +0200 | [diff] [blame] | 2781 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2782 | } |
| 2783 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2784 | #define PTTYPE 64 |
| 2785 | #include "paging_tmpl.h" |
| 2786 | #undef PTTYPE |
| 2787 | |
| 2788 | #define PTTYPE 32 |
| 2789 | #include "paging_tmpl.h" |
| 2790 | #undef PTTYPE |
| 2791 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2792 | static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 2793 | struct kvm_mmu *context, |
| 2794 | int level) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2795 | { |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2796 | int maxphyaddr = cpuid_maxphyaddr(vcpu); |
| 2797 | u64 exb_bit_rsvd = 0; |
| 2798 | |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2799 | if (!context->nx) |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2800 | exb_bit_rsvd = rsvd_bits(63, 63); |
| 2801 | switch (level) { |
| 2802 | case PT32_ROOT_LEVEL: |
| 2803 | /* no rsvd bits for 2 level 4K page table entries */ |
| 2804 | context->rsvd_bits_mask[0][1] = 0; |
| 2805 | context->rsvd_bits_mask[0][0] = 0; |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 2806 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; |
| 2807 | |
| 2808 | if (!is_pse(vcpu)) { |
| 2809 | context->rsvd_bits_mask[1][1] = 0; |
| 2810 | break; |
| 2811 | } |
| 2812 | |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2813 | if (is_cpuid_PSE36()) |
| 2814 | /* 36bits PSE 4MB page */ |
| 2815 | context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); |
| 2816 | else |
| 2817 | /* 32 bits PSE 4MB page */ |
| 2818 | context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2819 | break; |
| 2820 | case PT32E_ROOT_LEVEL: |
Dong, Eddie | 20c466b | 2009-03-31 23:03:45 +0800 | [diff] [blame] | 2821 | context->rsvd_bits_mask[0][2] = |
| 2822 | rsvd_bits(maxphyaddr, 63) | |
| 2823 | rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */ |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2824 | context->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 2825 | rsvd_bits(maxphyaddr, 62); /* PDE */ |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2826 | context->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
| 2827 | rsvd_bits(maxphyaddr, 62); /* PTE */ |
| 2828 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
| 2829 | rsvd_bits(maxphyaddr, 62) | |
| 2830 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 2831 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2832 | break; |
| 2833 | case PT64_ROOT_LEVEL: |
| 2834 | context->rsvd_bits_mask[0][3] = exb_bit_rsvd | |
| 2835 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); |
| 2836 | context->rsvd_bits_mask[0][2] = exb_bit_rsvd | |
| 2837 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); |
| 2838 | context->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 2839 | rsvd_bits(maxphyaddr, 51); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2840 | context->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
| 2841 | rsvd_bits(maxphyaddr, 51); |
| 2842 | context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; |
Joerg Roedel | e04da98 | 2009-07-27 16:30:45 +0200 | [diff] [blame] | 2843 | context->rsvd_bits_mask[1][2] = exb_bit_rsvd | |
| 2844 | rsvd_bits(maxphyaddr, 51) | |
| 2845 | rsvd_bits(13, 29); |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2846 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
Sheng Yang | 4c26b4c | 2009-04-02 10:28:37 +0800 | [diff] [blame] | 2847 | rsvd_bits(maxphyaddr, 51) | |
| 2848 | rsvd_bits(13, 20); /* large page */ |
Xiao Guangrong | f815bce | 2010-03-19 17:58:53 +0800 | [diff] [blame] | 2849 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; |
Dong, Eddie | 82725b2 | 2009-03-30 16:21:08 +0800 | [diff] [blame] | 2850 | break; |
| 2851 | } |
| 2852 | } |
| 2853 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2854 | static int paging64_init_context_common(struct kvm_vcpu *vcpu, |
| 2855 | struct kvm_mmu *context, |
| 2856 | int level) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2857 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2858 | context->nx = is_nx(vcpu); |
| 2859 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2860 | reset_rsvds_bits_mask(vcpu, context, level); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2861 | |
| 2862 | ASSERT(is_pae(vcpu)); |
| 2863 | context->new_cr3 = paging_new_cr3; |
| 2864 | context->page_fault = paging64_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2865 | context->gva_to_gpa = paging64_gva_to_gpa; |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 2866 | context->prefetch_page = paging64_prefetch_page; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2867 | context->sync_page = paging64_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2868 | context->invlpg = paging64_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2869 | context->update_pte = paging64_update_pte; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2870 | context->free = paging_free; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2871 | context->root_level = level; |
| 2872 | context->shadow_root_level = level; |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 2873 | context->root_hpa = INVALID_PAGE; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 2874 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2875 | return 0; |
| 2876 | } |
| 2877 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2878 | static int paging64_init_context(struct kvm_vcpu *vcpu, |
| 2879 | struct kvm_mmu *context) |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2880 | { |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2881 | return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 2882 | } |
| 2883 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2884 | static int paging32_init_context(struct kvm_vcpu *vcpu, |
| 2885 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2886 | { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2887 | context->nx = false; |
| 2888 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2889 | reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2890 | |
| 2891 | context->new_cr3 = paging_new_cr3; |
| 2892 | context->page_fault = paging32_page_fault; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2893 | context->gva_to_gpa = paging32_gva_to_gpa; |
| 2894 | context->free = paging_free; |
Avi Kivity | c7addb9 | 2007-09-16 18:58:32 +0200 | [diff] [blame] | 2895 | context->prefetch_page = paging32_prefetch_page; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2896 | context->sync_page = paging32_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2897 | context->invlpg = paging32_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2898 | context->update_pte = paging32_update_pte; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2899 | context->root_level = PT32_ROOT_LEVEL; |
| 2900 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 2901 | context->root_hpa = INVALID_PAGE; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 2902 | context->direct_map = false; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2903 | return 0; |
| 2904 | } |
| 2905 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2906 | static int paging32E_init_context(struct kvm_vcpu *vcpu, |
| 2907 | struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2908 | { |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2909 | return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2910 | } |
| 2911 | |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2912 | static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) |
| 2913 | { |
Joerg Roedel | 14dfe85 | 2010-09-10 17:30:49 +0200 | [diff] [blame] | 2914 | struct kvm_mmu *context = vcpu->arch.walk_mmu; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2915 | |
Avi Kivity | c445f8e | 2010-12-21 16:26:01 +0200 | [diff] [blame] | 2916 | context->base_role.word = 0; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2917 | context->new_cr3 = nonpaging_new_cr3; |
| 2918 | context->page_fault = tdp_page_fault; |
| 2919 | context->free = nonpaging_free; |
| 2920 | context->prefetch_page = nonpaging_prefetch_page; |
Marcelo Tosatti | e8bc217 | 2008-09-23 13:18:33 -0300 | [diff] [blame] | 2921 | context->sync_page = nonpaging_sync_page; |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2922 | context->invlpg = nonpaging_invlpg; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 2923 | context->update_pte = nonpaging_update_pte; |
Sheng Yang | 67253af | 2008-04-25 10:20:22 +0800 | [diff] [blame] | 2924 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2925 | context->root_hpa = INVALID_PAGE; |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 2926 | context->direct_map = true; |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 2927 | context->set_cr3 = kvm_x86_ops->set_tdp_cr3; |
Joerg Roedel | 5777ed3 | 2010-09-10 17:30:42 +0200 | [diff] [blame] | 2928 | context->get_cr3 = get_cr3; |
Joerg Roedel | cb659db | 2010-09-10 17:30:43 +0200 | [diff] [blame] | 2929 | context->inject_page_fault = kvm_inject_page_fault; |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2930 | context->nx = is_nx(vcpu); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2931 | |
| 2932 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2933 | context->nx = false; |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2934 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
| 2935 | context->root_level = 0; |
| 2936 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2937 | context->nx = is_nx(vcpu); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2938 | reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2939 | context->gva_to_gpa = paging64_gva_to_gpa; |
| 2940 | context->root_level = PT64_ROOT_LEVEL; |
| 2941 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2942 | context->nx = is_nx(vcpu); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2943 | reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2944 | context->gva_to_gpa = paging64_gva_to_gpa; |
| 2945 | context->root_level = PT32E_ROOT_LEVEL; |
| 2946 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 2947 | context->nx = false; |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2948 | reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 2949 | context->gva_to_gpa = paging32_gva_to_gpa; |
| 2950 | context->root_level = PT32_ROOT_LEVEL; |
| 2951 | } |
| 2952 | |
| 2953 | return 0; |
| 2954 | } |
| 2955 | |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2956 | int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2957 | { |
Avi Kivity | a770f6f | 2008-12-21 19:20:09 +0200 | [diff] [blame] | 2958 | int r; |
Avi Kivity | 411c588 | 2011-06-06 16:11:54 +0300 | [diff] [blame] | 2959 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2960 | ASSERT(vcpu); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 2961 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2962 | |
| 2963 | if (!is_paging(vcpu)) |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2964 | r = nonpaging_init_context(vcpu, context); |
Avi Kivity | a9058ec | 2006-12-29 16:49:37 -0800 | [diff] [blame] | 2965 | else if (is_long_mode(vcpu)) |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2966 | r = paging64_init_context(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2967 | else if (is_pae(vcpu)) |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2968 | r = paging32E_init_context(vcpu, context); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2969 | else |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2970 | r = paging32_init_context(vcpu, context); |
Avi Kivity | a770f6f | 2008-12-21 19:20:09 +0200 | [diff] [blame] | 2971 | |
Avi Kivity | 5b7e010 | 2010-04-14 19:20:03 +0300 | [diff] [blame] | 2972 | vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); |
Joerg Roedel | f43addd | 2010-09-10 17:30:40 +0200 | [diff] [blame] | 2973 | vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); |
Avi Kivity | 411c588 | 2011-06-06 16:11:54 +0300 | [diff] [blame] | 2974 | vcpu->arch.mmu.base_role.smep_andnot_wp |
| 2975 | = smep && !is_write_protection(vcpu); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2976 | |
| 2977 | return r; |
| 2978 | } |
| 2979 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); |
| 2980 | |
| 2981 | static int init_kvm_softmmu(struct kvm_vcpu *vcpu) |
| 2982 | { |
Joerg Roedel | 14dfe85 | 2010-09-10 17:30:49 +0200 | [diff] [blame] | 2983 | int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); |
Joerg Roedel | 52fde8d | 2010-09-10 17:30:44 +0200 | [diff] [blame] | 2984 | |
Joerg Roedel | 14dfe85 | 2010-09-10 17:30:49 +0200 | [diff] [blame] | 2985 | vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; |
| 2986 | vcpu->arch.walk_mmu->get_cr3 = get_cr3; |
| 2987 | vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; |
Avi Kivity | a770f6f | 2008-12-21 19:20:09 +0200 | [diff] [blame] | 2988 | |
| 2989 | return r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2990 | } |
| 2991 | |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 2992 | static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) |
| 2993 | { |
| 2994 | struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; |
| 2995 | |
| 2996 | g_context->get_cr3 = get_cr3; |
| 2997 | g_context->inject_page_fault = kvm_inject_page_fault; |
| 2998 | |
| 2999 | /* |
| 3000 | * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The |
| 3001 | * translation of l2_gpa to l1_gpa addresses is done using the |
| 3002 | * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa |
| 3003 | * functions between mmu and nested_mmu are swapped. |
| 3004 | */ |
| 3005 | if (!is_paging(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 3006 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 3007 | g_context->root_level = 0; |
| 3008 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; |
| 3009 | } else if (is_long_mode(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 3010 | g_context->nx = is_nx(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 3011 | reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); |
| 3012 | g_context->root_level = PT64_ROOT_LEVEL; |
| 3013 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 3014 | } else if (is_pae(vcpu)) { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 3015 | g_context->nx = is_nx(vcpu); |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 3016 | reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); |
| 3017 | g_context->root_level = PT32E_ROOT_LEVEL; |
| 3018 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 3019 | } else { |
Joerg Roedel | 2d48a98 | 2010-09-10 17:31:01 +0200 | [diff] [blame] | 3020 | g_context->nx = false; |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 3021 | reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); |
| 3022 | g_context->root_level = PT32_ROOT_LEVEL; |
| 3023 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; |
| 3024 | } |
| 3025 | |
| 3026 | return 0; |
| 3027 | } |
| 3028 | |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 3029 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) |
| 3030 | { |
Joerg Roedel | 02f59dc | 2010-09-10 17:30:54 +0200 | [diff] [blame] | 3031 | if (mmu_is_nested(vcpu)) |
| 3032 | return init_kvm_nested_mmu(vcpu); |
| 3033 | else if (tdp_enabled) |
Joerg Roedel | fb72d16 | 2008-02-07 13:47:44 +0100 | [diff] [blame] | 3034 | return init_kvm_tdp_mmu(vcpu); |
| 3035 | else |
| 3036 | return init_kvm_softmmu(vcpu); |
| 3037 | } |
| 3038 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3039 | static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) |
| 3040 | { |
| 3041 | ASSERT(vcpu); |
Sheng Yang | 62ad075 | 2010-05-12 16:40:41 +0800 | [diff] [blame] | 3042 | if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
| 3043 | /* mmu.free() should set root_hpa = INVALID_PAGE */ |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3044 | vcpu->arch.mmu.free(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3045 | } |
| 3046 | |
| 3047 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) |
| 3048 | { |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 3049 | destroy_kvm_mmu(vcpu); |
Marcelo Tosatti | f8f7e5e | 2011-06-21 14:00:10 -0300 | [diff] [blame] | 3050 | return init_kvm_mmu(vcpu); |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 3051 | } |
Eddie Dong | 8668a3c | 2007-10-10 14:26:45 +0800 | [diff] [blame] | 3052 | EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 3053 | |
| 3054 | int kvm_mmu_load(struct kvm_vcpu *vcpu) |
| 3055 | { |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 3056 | int r; |
| 3057 | |
Avi Kivity | e2dec93 | 2007-01-05 16:36:54 -0800 | [diff] [blame] | 3058 | r = mmu_topup_memory_caches(vcpu); |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 3059 | if (r) |
| 3060 | goto out; |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3061 | r = mmu_alloc_roots(vcpu); |
Avi Kivity | 8facbbf | 2010-05-04 12:58:32 +0300 | [diff] [blame] | 3062 | spin_lock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 0ba73cd | 2008-09-23 13:18:34 -0300 | [diff] [blame] | 3063 | mmu_sync_roots(vcpu); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3064 | spin_unlock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 8986ecc | 2009-05-12 18:55:45 -0300 | [diff] [blame] | 3065 | if (r) |
| 3066 | goto out; |
Sheng Yang | 3662cb1 | 2009-07-09 17:00:42 +0800 | [diff] [blame] | 3067 | /* set_cr3() should ensure TLB has been flushed */ |
Joerg Roedel | f43addd | 2010-09-10 17:30:40 +0200 | [diff] [blame] | 3068 | vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); |
Avi Kivity | 714b93d | 2007-01-05 16:36:53 -0800 | [diff] [blame] | 3069 | out: |
| 3070 | return r; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3071 | } |
Avi Kivity | 17c3ba9 | 2007-06-04 15:58:30 +0300 | [diff] [blame] | 3072 | EXPORT_SYMBOL_GPL(kvm_mmu_load); |
| 3073 | |
| 3074 | void kvm_mmu_unload(struct kvm_vcpu *vcpu) |
| 3075 | { |
| 3076 | mmu_free_roots(vcpu); |
| 3077 | } |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 3078 | EXPORT_SYMBOL_GPL(kvm_mmu_unload); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3079 | |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 3080 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 3081 | struct kvm_mmu_page *sp, u64 *spte, |
| 3082 | const void *new) |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 3083 | { |
Marcelo Tosatti | 3094538 | 2008-06-11 20:32:40 -0300 | [diff] [blame] | 3084 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { |
Joerg Roedel | 7e4e405 | 2009-07-27 16:30:46 +0200 | [diff] [blame] | 3085 | ++vcpu->kvm->stat.mmu_pde_zapped; |
| 3086 | return; |
Marcelo Tosatti | 3094538 | 2008-06-11 20:32:40 -0300 | [diff] [blame] | 3087 | } |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 3088 | |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 3089 | ++vcpu->kvm->stat.mmu_pte_updated; |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 3090 | vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); |
Avi Kivity | 0028425 | 2007-05-01 16:53:31 +0300 | [diff] [blame] | 3091 | } |
| 3092 | |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 3093 | static bool need_remote_flush(u64 old, u64 new) |
| 3094 | { |
| 3095 | if (!is_shadow_present_pte(old)) |
| 3096 | return false; |
| 3097 | if (!is_shadow_present_pte(new)) |
| 3098 | return true; |
| 3099 | if ((old ^ new) & PT64_BASE_ADDR_MASK) |
| 3100 | return true; |
| 3101 | old ^= PT64_NX_MASK; |
| 3102 | new ^= PT64_NX_MASK; |
| 3103 | return (old & ~new & PT64_PERM_MASK) != 0; |
| 3104 | } |
| 3105 | |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3106 | static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, |
| 3107 | bool remote_flush, bool local_flush) |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 3108 | { |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3109 | if (zap_page) |
| 3110 | return; |
| 3111 | |
| 3112 | if (remote_flush) |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 3113 | kvm_flush_remote_tlbs(vcpu->kvm); |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3114 | else if (local_flush) |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 3115 | kvm_mmu_flush_tlb(vcpu); |
| 3116 | } |
| 3117 | |
Avi Kivity | 12b7d28 | 2007-09-23 14:10:49 +0200 | [diff] [blame] | 3118 | static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) |
| 3119 | { |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3120 | u64 *spte = vcpu->arch.last_pte_updated; |
Avi Kivity | 12b7d28 | 2007-09-23 14:10:49 +0200 | [diff] [blame] | 3121 | |
Sheng Yang | 7b52345 | 2008-04-25 21:13:50 +0800 | [diff] [blame] | 3122 | return !!(spte && (*spte & shadow_accessed_mask)); |
Avi Kivity | 12b7d28 | 2007-09-23 14:10:49 +0200 | [diff] [blame] | 3123 | } |
| 3124 | |
Avi Kivity | 1b7fcd3 | 2008-05-15 13:51:35 +0300 | [diff] [blame] | 3125 | static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 3126 | { |
| 3127 | u64 *spte = vcpu->arch.last_pte_updated; |
| 3128 | |
| 3129 | if (spte |
| 3130 | && vcpu->arch.last_pte_gfn == gfn |
| 3131 | && shadow_accessed_mask |
| 3132 | && !(*spte & shadow_accessed_mask) |
| 3133 | && is_shadow_present_pte(*spte)) |
| 3134 | set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); |
| 3135 | } |
| 3136 | |
Avi Kivity | 09072da | 2007-05-01 14:16:52 +0300 | [diff] [blame] | 3137 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
Marcelo Tosatti | ad218f8 | 2008-12-01 22:32:05 -0200 | [diff] [blame] | 3138 | const u8 *new, int bytes, |
| 3139 | bool guest_initiated) |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 3140 | { |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3141 | gfn_t gfn = gpa >> PAGE_SHIFT; |
Xiao Guangrong | fa1de2b | 2010-07-16 11:19:51 +0800 | [diff] [blame] | 3142 | union kvm_mmu_page_role mask = { .word = 0 }; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3143 | struct kvm_mmu_page *sp; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 3144 | struct hlist_node *node; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3145 | LIST_HEAD(invalid_list); |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 3146 | u64 entry, gentry, *spte; |
| 3147 | unsigned pte_size, page_offset, misaligned, quadrant, offset; |
| 3148 | int level, npte, invlpg_counter, r, flooded = 0; |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3149 | bool remote_flush, local_flush, zap_page; |
| 3150 | |
Xiao Guangrong | 332b207 | 2011-05-15 23:20:27 +0800 | [diff] [blame] | 3151 | /* |
| 3152 | * If we don't have indirect shadow pages, it means no page is |
| 3153 | * write-protected, so we can exit simply. |
| 3154 | */ |
| 3155 | if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) |
| 3156 | return; |
| 3157 | |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3158 | zap_page = remote_flush = local_flush = false; |
Xiao Guangrong | 0f53b5b | 2011-03-09 15:43:51 +0800 | [diff] [blame] | 3159 | offset = offset_in_page(gpa); |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3160 | |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 3161 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 3162 | |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 3163 | invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter); |
| 3164 | |
| 3165 | /* |
| 3166 | * Assume that the pte write on a page table of the same type |
Xiao Guangrong | 49b26e2 | 2011-03-04 19:00:00 +0800 | [diff] [blame] | 3167 | * as the current vcpu paging mode since we update the sptes only |
| 3168 | * when they have the same mode. |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 3169 | */ |
| 3170 | if ((is_pae(vcpu) && bytes == 4) || !new) { |
| 3171 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ |
| 3172 | if (is_pae(vcpu)) { |
| 3173 | gpa &= ~(gpa_t)7; |
| 3174 | bytes = 8; |
| 3175 | } |
| 3176 | r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8)); |
| 3177 | if (r) |
| 3178 | gentry = 0; |
| 3179 | new = (const u8 *)&gentry; |
| 3180 | } |
| 3181 | |
Avi Kivity | 72016f3 | 2010-03-15 13:59:53 +0200 | [diff] [blame] | 3182 | switch (bytes) { |
| 3183 | case 4: |
| 3184 | gentry = *(const u32 *)new; |
| 3185 | break; |
| 3186 | case 8: |
| 3187 | gentry = *(const u64 *)new; |
| 3188 | break; |
| 3189 | default: |
| 3190 | gentry = 0; |
| 3191 | break; |
| 3192 | } |
| 3193 | |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3194 | spin_lock(&vcpu->kvm->mmu_lock); |
Avi Kivity | 08e850c | 2010-03-15 13:59:57 +0200 | [diff] [blame] | 3195 | if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) |
| 3196 | gentry = 0; |
Avi Kivity | eb787d1 | 2007-12-31 15:27:49 +0200 | [diff] [blame] | 3197 | kvm_mmu_free_some_pages(vcpu); |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 3198 | ++vcpu->kvm->stat.mmu_pte_write; |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 3199 | trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
Marcelo Tosatti | ad218f8 | 2008-12-01 22:32:05 -0200 | [diff] [blame] | 3200 | if (guest_initiated) { |
Xiao Guangrong | 1b7fd45 | 2011-03-04 18:58:02 +0800 | [diff] [blame] | 3201 | kvm_mmu_access_page(vcpu, gfn); |
Marcelo Tosatti | ad218f8 | 2008-12-01 22:32:05 -0200 | [diff] [blame] | 3202 | if (gfn == vcpu->arch.last_pt_write_gfn |
| 3203 | && !last_updated_pte_accessed(vcpu)) { |
| 3204 | ++vcpu->arch.last_pt_write_count; |
| 3205 | if (vcpu->arch.last_pt_write_count >= 3) |
| 3206 | flooded = 1; |
| 3207 | } else { |
| 3208 | vcpu->arch.last_pt_write_gfn = gfn; |
| 3209 | vcpu->arch.last_pt_write_count = 1; |
| 3210 | vcpu->arch.last_pte_updated = NULL; |
| 3211 | } |
Avi Kivity | 86a5ba0 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 3212 | } |
Xiao Guangrong | 3246af0 | 2010-04-16 16:35:54 +0800 | [diff] [blame] | 3213 | |
Xiao Guangrong | fa1de2b | 2010-07-16 11:19:51 +0800 | [diff] [blame] | 3214 | mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 3215 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { |
Avi Kivity | 5b7e010 | 2010-04-14 19:20:03 +0300 | [diff] [blame] | 3216 | pte_size = sp->role.cr4_pae ? 8 : 4; |
Avi Kivity | 0e7bc4b | 2007-01-05 16:36:48 -0800 | [diff] [blame] | 3217 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
Avi Kivity | e925c5b | 2007-04-30 14:47:02 +0300 | [diff] [blame] | 3218 | misaligned |= bytes < 4; |
Avi Kivity | 86a5ba0 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 3219 | if (misaligned || flooded) { |
Avi Kivity | 0e7bc4b | 2007-01-05 16:36:48 -0800 | [diff] [blame] | 3220 | /* |
| 3221 | * Misaligned accesses are too much trouble to fix |
| 3222 | * up; also, they usually indicate a page is not used |
| 3223 | * as a page table. |
Avi Kivity | 86a5ba0 | 2007-01-05 16:36:50 -0800 | [diff] [blame] | 3224 | * |
| 3225 | * If we're seeing too many writes to a page, |
| 3226 | * it may no longer be a page table, or we may be |
| 3227 | * forking, in which case it is better to unmap the |
| 3228 | * page. |
Avi Kivity | 0e7bc4b | 2007-01-05 16:36:48 -0800 | [diff] [blame] | 3229 | */ |
| 3230 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3231 | gpa, bytes, sp->role.word); |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3232 | zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, |
Xiao Guangrong | f41d335 | 2010-06-04 21:56:11 +0800 | [diff] [blame] | 3233 | &invalid_list); |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 3234 | ++vcpu->kvm->stat.mmu_flooded; |
Avi Kivity | 0e7bc4b | 2007-01-05 16:36:48 -0800 | [diff] [blame] | 3235 | continue; |
| 3236 | } |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3237 | page_offset = offset; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3238 | level = sp->role.level; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 3239 | npte = 1; |
Avi Kivity | 5b7e010 | 2010-04-14 19:20:03 +0300 | [diff] [blame] | 3240 | if (!sp->role.cr4_pae) { |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 3241 | page_offset <<= 1; /* 32->64 */ |
| 3242 | /* |
| 3243 | * A 32-bit pde maps 4MB while the shadow pdes map |
| 3244 | * only 2MB. So we need to double the offset again |
| 3245 | * and zap two pdes instead of one. |
| 3246 | */ |
| 3247 | if (level == PT32_ROOT_LEVEL) { |
Avi Kivity | 6b8d0f9 | 2007-04-18 11:18:18 +0300 | [diff] [blame] | 3248 | page_offset &= ~7; /* kill rounding error */ |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 3249 | page_offset <<= 1; |
| 3250 | npte = 2; |
| 3251 | } |
Avi Kivity | fce0657 | 2007-05-01 16:44:05 +0300 | [diff] [blame] | 3252 | quadrant = page_offset >> PAGE_SHIFT; |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3253 | page_offset &= ~PAGE_MASK; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3254 | if (quadrant != sp->role.quadrant) |
Avi Kivity | fce0657 | 2007-05-01 16:44:05 +0300 | [diff] [blame] | 3255 | continue; |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3256 | } |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3257 | local_flush = true; |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3258 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 3259 | while (npte--) { |
Avi Kivity | 79539ce | 2007-11-21 02:06:21 +0200 | [diff] [blame] | 3260 | entry = *spte; |
Xiao Guangrong | 38e3b2b | 2011-05-15 23:27:52 +0800 | [diff] [blame] | 3261 | mmu_page_zap_pte(vcpu->kvm, sp, spte); |
Xiao Guangrong | fa1de2b | 2010-07-16 11:19:51 +0800 | [diff] [blame] | 3262 | if (gentry && |
| 3263 | !((sp->role.word ^ vcpu->arch.mmu.base_role.word) |
| 3264 | & mask.word)) |
Xiao Guangrong | 7c56252 | 2011-03-28 10:29:27 +0800 | [diff] [blame] | 3265 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3266 | if (!remote_flush && need_remote_flush(entry, *spte)) |
| 3267 | remote_flush = true; |
Avi Kivity | ac1b714 | 2007-03-08 17:13:32 +0200 | [diff] [blame] | 3268 | ++spte; |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3269 | } |
Avi Kivity | 9b7a032 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3270 | } |
Xiao Guangrong | 0671a8e | 2010-06-04 21:56:59 +0800 | [diff] [blame] | 3271 | mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3272 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
Xiao Guangrong | 8b1fe17 | 2010-08-30 18:22:53 +0800 | [diff] [blame] | 3273 | trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3274 | spin_unlock(&vcpu->kvm->mmu_lock); |
Avi Kivity | da4a00f | 2007-01-05 16:36:44 -0800 | [diff] [blame] | 3275 | } |
| 3276 | |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3277 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) |
| 3278 | { |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 3279 | gpa_t gpa; |
| 3280 | int r; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3281 | |
Joerg Roedel | c5a78f2b | 2010-09-10 17:30:39 +0200 | [diff] [blame] | 3282 | if (vcpu->arch.mmu.direct_map) |
Avi Kivity | 60f2478 | 2009-08-27 13:37:06 +0300 | [diff] [blame] | 3283 | return 0; |
| 3284 | |
Gleb Natapov | 1871c60 | 2010-02-10 14:21:32 +0200 | [diff] [blame] | 3285 | gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 3286 | |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3287 | spin_lock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 3288 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3289 | spin_unlock(&vcpu->kvm->mmu_lock); |
Marcelo Tosatti | 10589a4 | 2007-12-20 19:18:22 -0500 | [diff] [blame] | 3290 | return r; |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3291 | } |
Avi Kivity | 577bdc4 | 2008-07-19 08:57:05 +0300 | [diff] [blame] | 3292 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); |
Avi Kivity | a436036 | 2007-01-05 16:36:45 -0800 | [diff] [blame] | 3293 | |
Avi Kivity | 22d95b1 | 2007-09-14 20:26:06 +0300 | [diff] [blame] | 3294 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 3295 | { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3296 | LIST_HEAD(invalid_list); |
Xiao Guangrong | 103ad25 | 2010-06-04 21:54:38 +0800 | [diff] [blame] | 3297 | |
Dave Hansen | e0df7b9 | 2010-08-19 18:11:05 -0700 | [diff] [blame] | 3298 | while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES && |
Izik Eidus | 3b80fff | 2009-07-28 15:26:58 -0300 | [diff] [blame] | 3299 | !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3300 | struct kvm_mmu_page *sp; |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 3301 | |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 3302 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3303 | struct kvm_mmu_page, link); |
Dave Hansen | e0df7b9 | 2010-08-19 18:11:05 -0700 | [diff] [blame] | 3304 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
Avi Kivity | 4cee576 | 2007-11-18 16:37:07 +0200 | [diff] [blame] | 3305 | ++vcpu->kvm->stat.mmu_recycled; |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 3306 | } |
Xiao Guangrong | aa6bd18 | 2011-07-12 03:26:40 +0800 | [diff] [blame^] | 3307 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 3308 | } |
Avi Kivity | ebeace8 | 2007-01-05 16:36:47 -0800 | [diff] [blame] | 3309 | |
Andre Przywara | dc25e89 | 2010-12-21 11:12:07 +0100 | [diff] [blame] | 3310 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, |
| 3311 | void *insn, int insn_len) |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 3312 | { |
| 3313 | int r; |
| 3314 | enum emulation_result er; |
| 3315 | |
Gleb Natapov | 56028d0 | 2010-10-17 18:13:42 +0200 | [diff] [blame] | 3316 | r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 3317 | if (r < 0) |
| 3318 | goto out; |
| 3319 | |
| 3320 | if (!r) { |
| 3321 | r = 1; |
| 3322 | goto out; |
| 3323 | } |
| 3324 | |
Avi Kivity | b733bfb | 2007-10-28 18:52:05 +0200 | [diff] [blame] | 3325 | r = mmu_topup_memory_caches(vcpu); |
| 3326 | if (r) |
| 3327 | goto out; |
| 3328 | |
Andre Przywara | dc25e89 | 2010-12-21 11:12:07 +0100 | [diff] [blame] | 3329 | er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 3330 | |
| 3331 | switch (er) { |
| 3332 | case EMULATE_DONE: |
| 3333 | return 1; |
| 3334 | case EMULATE_DO_MMIO: |
| 3335 | ++vcpu->stat.mmio_exits; |
Gleb Natapov | 6d77dbf | 2010-05-10 11:16:56 +0300 | [diff] [blame] | 3336 | /* fall through */ |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 3337 | case EMULATE_FAIL: |
Avi Kivity | 3f5d18a | 2009-06-11 15:43:28 +0300 | [diff] [blame] | 3338 | return 0; |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 3339 | default: |
| 3340 | BUG(); |
| 3341 | } |
| 3342 | out: |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 3343 | return r; |
| 3344 | } |
| 3345 | EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); |
| 3346 | |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 3347 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
| 3348 | { |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 3349 | vcpu->arch.mmu.invlpg(vcpu, gva); |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 3350 | kvm_mmu_flush_tlb(vcpu); |
| 3351 | ++vcpu->stat.invlpg; |
| 3352 | } |
| 3353 | EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); |
| 3354 | |
Joerg Roedel | 1855267 | 2008-02-07 13:47:41 +0100 | [diff] [blame] | 3355 | void kvm_enable_tdp(void) |
| 3356 | { |
| 3357 | tdp_enabled = true; |
| 3358 | } |
| 3359 | EXPORT_SYMBOL_GPL(kvm_enable_tdp); |
| 3360 | |
Joerg Roedel | 5f4cb66 | 2008-07-14 20:36:36 +0200 | [diff] [blame] | 3361 | void kvm_disable_tdp(void) |
| 3362 | { |
| 3363 | tdp_enabled = false; |
| 3364 | } |
| 3365 | EXPORT_SYMBOL_GPL(kvm_disable_tdp); |
| 3366 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3367 | static void free_mmu_pages(struct kvm_vcpu *vcpu) |
| 3368 | { |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3369 | free_page((unsigned long)vcpu->arch.mmu.pae_root); |
Joerg Roedel | 81407ca | 2010-09-10 17:31:00 +0200 | [diff] [blame] | 3370 | if (vcpu->arch.mmu.lm_root != NULL) |
| 3371 | free_page((unsigned long)vcpu->arch.mmu.lm_root); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3372 | } |
| 3373 | |
| 3374 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) |
| 3375 | { |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3376 | struct page *page; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3377 | int i; |
| 3378 | |
| 3379 | ASSERT(vcpu); |
| 3380 | |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3381 | /* |
| 3382 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. |
| 3383 | * Therefore we need to allocate shadow page tables in the first |
| 3384 | * 4GB of memory, which happens to fit the DMA32 zone. |
| 3385 | */ |
| 3386 | page = alloc_page(GFP_KERNEL | __GFP_DMA32); |
| 3387 | if (!page) |
Wei Yongjun | d7fa6ab | 2010-01-22 16:55:05 +0800 | [diff] [blame] | 3388 | return -ENOMEM; |
| 3389 | |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3390 | vcpu->arch.mmu.pae_root = page_address(page); |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3391 | for (i = 0; i < 4; ++i) |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3392 | vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; |
Avi Kivity | 17ac10a | 2007-01-05 16:36:40 -0800 | [diff] [blame] | 3393 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3394 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3395 | } |
| 3396 | |
Ingo Molnar | 8018c27 | 2006-12-29 16:50:01 -0800 | [diff] [blame] | 3397 | int kvm_mmu_create(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3398 | { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3399 | ASSERT(vcpu); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3400 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3401 | |
Ingo Molnar | 8018c27 | 2006-12-29 16:50:01 -0800 | [diff] [blame] | 3402 | return alloc_mmu_pages(vcpu); |
| 3403 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3404 | |
Ingo Molnar | 8018c27 | 2006-12-29 16:50:01 -0800 | [diff] [blame] | 3405 | int kvm_mmu_setup(struct kvm_vcpu *vcpu) |
| 3406 | { |
| 3407 | ASSERT(vcpu); |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3408 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
Avi Kivity | 2c26495 | 2006-12-22 01:05:28 -0800 | [diff] [blame] | 3409 | |
Ingo Molnar | 8018c27 | 2006-12-29 16:50:01 -0800 | [diff] [blame] | 3410 | return init_kvm_mmu(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3411 | } |
| 3412 | |
Avi Kivity | 90cb052 | 2007-07-17 13:04:56 +0300 | [diff] [blame] | 3413 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3414 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3415 | struct kvm_mmu_page *sp; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3416 | |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 3417 | list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3418 | int i; |
| 3419 | u64 *pt; |
| 3420 | |
Sheng Yang | 291f26b | 2008-10-16 17:30:57 +0800 | [diff] [blame] | 3421 | if (!test_bit(slot, sp->slot_bitmap)) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3422 | continue; |
| 3423 | |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3424 | pt = sp->spt; |
Avi Kivity | 8234b22 | 2010-12-27 12:08:45 +0200 | [diff] [blame] | 3425 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
Xiao Guangrong | da8dc75 | 2011-03-04 18:56:41 +0800 | [diff] [blame] | 3426 | if (!is_shadow_present_pte(pt[i]) || |
| 3427 | !is_last_spte(pt[i], sp->role.level)) |
| 3428 | continue; |
| 3429 | |
| 3430 | if (is_large_pte(pt[i])) { |
Avi Kivity | 8234b22 | 2010-12-27 12:08:45 +0200 | [diff] [blame] | 3431 | drop_spte(kvm, &pt[i], |
| 3432 | shadow_trap_nonpresent_pte); |
| 3433 | --kvm->stat.lpages; |
Xiao Guangrong | da8dc75 | 2011-03-04 18:56:41 +0800 | [diff] [blame] | 3434 | continue; |
Avi Kivity | 8234b22 | 2010-12-27 12:08:45 +0200 | [diff] [blame] | 3435 | } |
Xiao Guangrong | da8dc75 | 2011-03-04 18:56:41 +0800 | [diff] [blame] | 3436 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3437 | /* avoid RMW */ |
Gui Jianfeng | 01c168a | 2010-05-27 16:09:48 +0800 | [diff] [blame] | 3438 | if (is_writable_pte(pt[i])) |
Takuya Yoshikawa | 700e1b1 | 2010-12-06 01:11:33 +0900 | [diff] [blame] | 3439 | update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK); |
Avi Kivity | 8234b22 | 2010-12-27 12:08:45 +0200 | [diff] [blame] | 3440 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3441 | } |
Avi Kivity | 171d595 | 2008-08-27 16:40:51 +0300 | [diff] [blame] | 3442 | kvm_flush_remote_tlbs(kvm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3443 | } |
Avi Kivity | 37a7d8b | 2007-01-05 16:36:56 -0800 | [diff] [blame] | 3444 | |
Avi Kivity | 90cb052 | 2007-07-17 13:04:56 +0300 | [diff] [blame] | 3445 | void kvm_mmu_zap_all(struct kvm *kvm) |
Dor Laor | e0fa826 | 2007-03-30 13:06:33 +0300 | [diff] [blame] | 3446 | { |
Avi Kivity | 4db3531 | 2007-11-21 15:28:32 +0200 | [diff] [blame] | 3447 | struct kvm_mmu_page *sp, *node; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3448 | LIST_HEAD(invalid_list); |
Dor Laor | e0fa826 | 2007-03-30 13:06:33 +0300 | [diff] [blame] | 3449 | |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3450 | spin_lock(&kvm->mmu_lock); |
Xiao Guangrong | 3246af0 | 2010-04-16 16:35:54 +0800 | [diff] [blame] | 3451 | restart: |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 3452 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3453 | if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) |
Xiao Guangrong | 3246af0 | 2010-04-16 16:35:54 +0800 | [diff] [blame] | 3454 | goto restart; |
| 3455 | |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3456 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 3457 | spin_unlock(&kvm->mmu_lock); |
Dor Laor | e0fa826 | 2007-03-30 13:06:33 +0300 | [diff] [blame] | 3458 | } |
| 3459 | |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3460 | static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, |
| 3461 | struct list_head *invalid_list) |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3462 | { |
| 3463 | struct kvm_mmu_page *page; |
| 3464 | |
| 3465 | page = container_of(kvm->arch.active_mmu_pages.prev, |
| 3466 | struct kvm_mmu_page, link); |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3467 | return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3468 | } |
| 3469 | |
Ying Han | 1495f23 | 2011-05-24 17:12:27 -0700 | [diff] [blame] | 3470 | static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3471 | { |
| 3472 | struct kvm *kvm; |
| 3473 | struct kvm *kvm_freed = NULL; |
Ying Han | 1495f23 | 2011-05-24 17:12:27 -0700 | [diff] [blame] | 3474 | int nr_to_scan = sc->nr_to_scan; |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 3475 | |
| 3476 | if (nr_to_scan == 0) |
| 3477 | goto out; |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3478 | |
Jan Kiszka | e935b83 | 2011-02-08 12:55:33 +0100 | [diff] [blame] | 3479 | raw_spin_lock(&kvm_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3480 | |
| 3481 | list_for_each_entry(kvm, &vm_list, vm_list) { |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 3482 | int idx, freed_pages; |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3483 | LIST_HEAD(invalid_list); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3484 | |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 3485 | idx = srcu_read_lock(&kvm->srcu); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3486 | spin_lock(&kvm->mmu_lock); |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 3487 | if (!kvm_freed && nr_to_scan > 0 && |
| 3488 | kvm->arch.n_used_mmu_pages > 0) { |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3489 | freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm, |
| 3490 | &invalid_list); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3491 | kvm_freed = kvm; |
| 3492 | } |
| 3493 | nr_to_scan--; |
| 3494 | |
Xiao Guangrong | d98ba05 | 2010-06-04 21:55:29 +0800 | [diff] [blame] | 3495 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3496 | spin_unlock(&kvm->mmu_lock); |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 3497 | srcu_read_unlock(&kvm->srcu, idx); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3498 | } |
| 3499 | if (kvm_freed) |
| 3500 | list_move_tail(&kvm_freed->vm_list, &vm_list); |
| 3501 | |
Jan Kiszka | e935b83 | 2011-02-08 12:55:33 +0100 | [diff] [blame] | 3502 | raw_spin_unlock(&kvm_lock); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3503 | |
Dave Hansen | 45221ab | 2010-08-19 18:11:37 -0700 | [diff] [blame] | 3504 | out: |
| 3505 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3506 | } |
| 3507 | |
| 3508 | static struct shrinker mmu_shrinker = { |
| 3509 | .shrink = mmu_shrink, |
| 3510 | .seeks = DEFAULT_SEEKS * 10, |
| 3511 | }; |
| 3512 | |
Ingo Molnar | 2ddfd20 | 2008-05-22 10:37:48 +0200 | [diff] [blame] | 3513 | static void mmu_destroy_caches(void) |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 3514 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 3515 | if (pte_list_desc_cache) |
| 3516 | kmem_cache_destroy(pte_list_desc_cache); |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 3517 | if (mmu_page_header_cache) |
| 3518 | kmem_cache_destroy(mmu_page_header_cache); |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 3519 | } |
| 3520 | |
| 3521 | int kvm_mmu_module_init(void) |
| 3522 | { |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 3523 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
| 3524 | sizeof(struct pte_list_desc), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3525 | 0, 0, NULL); |
Xiao Guangrong | 53c07b1 | 2011-05-15 23:26:20 +0800 | [diff] [blame] | 3526 | if (!pte_list_desc_cache) |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 3527 | goto nomem; |
| 3528 | |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 3529 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
| 3530 | sizeof(struct kvm_mmu_page), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3531 | 0, 0, NULL); |
Avi Kivity | d3d25b0 | 2007-05-30 12:34:53 +0300 | [diff] [blame] | 3532 | if (!mmu_page_header_cache) |
| 3533 | goto nomem; |
| 3534 | |
Wei Yongjun | 45bf21a | 2010-08-23 16:13:15 +0800 | [diff] [blame] | 3535 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0)) |
| 3536 | goto nomem; |
| 3537 | |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3538 | register_shrinker(&mmu_shrinker); |
| 3539 | |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 3540 | return 0; |
| 3541 | |
| 3542 | nomem: |
Izik Eidus | 3ee16c8 | 2008-03-30 15:17:21 +0300 | [diff] [blame] | 3543 | mmu_destroy_caches(); |
Avi Kivity | b5a33a7 | 2007-04-15 16:31:09 +0300 | [diff] [blame] | 3544 | return -ENOMEM; |
| 3545 | } |
| 3546 | |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 3547 | /* |
| 3548 | * Caculate mmu pages needed for kvm. |
| 3549 | */ |
| 3550 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) |
| 3551 | { |
| 3552 | int i; |
| 3553 | unsigned int nr_mmu_pages; |
| 3554 | unsigned int nr_pages = 0; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 3555 | struct kvm_memslots *slots; |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 3556 | |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 3557 | slots = kvm_memslots(kvm); |
| 3558 | |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 3559 | for (i = 0; i < slots->nmemslots; i++) |
| 3560 | nr_pages += slots->memslots[i].npages; |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 3561 | |
| 3562 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
| 3563 | nr_mmu_pages = max(nr_mmu_pages, |
| 3564 | (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); |
| 3565 | |
| 3566 | return nr_mmu_pages; |
| 3567 | } |
| 3568 | |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3569 | static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer, |
| 3570 | unsigned len) |
| 3571 | { |
| 3572 | if (len > buffer->len) |
| 3573 | return NULL; |
| 3574 | return buffer->ptr; |
| 3575 | } |
| 3576 | |
| 3577 | static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer, |
| 3578 | unsigned len) |
| 3579 | { |
| 3580 | void *ret; |
| 3581 | |
| 3582 | ret = pv_mmu_peek_buffer(buffer, len); |
| 3583 | if (!ret) |
| 3584 | return ret; |
| 3585 | buffer->ptr += len; |
| 3586 | buffer->len -= len; |
| 3587 | buffer->processed += len; |
| 3588 | return ret; |
| 3589 | } |
| 3590 | |
| 3591 | static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, |
| 3592 | gpa_t addr, gpa_t value) |
| 3593 | { |
| 3594 | int bytes = 8; |
| 3595 | int r; |
| 3596 | |
| 3597 | if (!is_long_mode(vcpu) && !is_pae(vcpu)) |
| 3598 | bytes = 4; |
| 3599 | |
| 3600 | r = mmu_topup_memory_caches(vcpu); |
| 3601 | if (r) |
| 3602 | return r; |
| 3603 | |
Marcelo Tosatti | 3200f40 | 2008-03-29 20:17:59 -0300 | [diff] [blame] | 3604 | if (!emulator_write_phys(vcpu, addr, &value, bytes)) |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3605 | return -EFAULT; |
| 3606 | |
| 3607 | return 1; |
| 3608 | } |
| 3609 | |
| 3610 | static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
| 3611 | { |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 3612 | (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu)); |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3613 | return 1; |
| 3614 | } |
| 3615 | |
| 3616 | static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr) |
| 3617 | { |
| 3618 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3619 | mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT); |
| 3620 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3621 | return 1; |
| 3622 | } |
| 3623 | |
| 3624 | static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu, |
| 3625 | struct kvm_pv_mmu_op_buffer *buffer) |
| 3626 | { |
| 3627 | struct kvm_mmu_op_header *header; |
| 3628 | |
| 3629 | header = pv_mmu_peek_buffer(buffer, sizeof *header); |
| 3630 | if (!header) |
| 3631 | return 0; |
| 3632 | switch (header->op) { |
| 3633 | case KVM_MMU_OP_WRITE_PTE: { |
| 3634 | struct kvm_mmu_op_write_pte *wpte; |
| 3635 | |
| 3636 | wpte = pv_mmu_read_buffer(buffer, sizeof *wpte); |
| 3637 | if (!wpte) |
| 3638 | return 0; |
| 3639 | return kvm_pv_mmu_write(vcpu, wpte->pte_phys, |
| 3640 | wpte->pte_val); |
| 3641 | } |
| 3642 | case KVM_MMU_OP_FLUSH_TLB: { |
| 3643 | struct kvm_mmu_op_flush_tlb *ftlb; |
| 3644 | |
| 3645 | ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb); |
| 3646 | if (!ftlb) |
| 3647 | return 0; |
| 3648 | return kvm_pv_mmu_flush_tlb(vcpu); |
| 3649 | } |
| 3650 | case KVM_MMU_OP_RELEASE_PT: { |
| 3651 | struct kvm_mmu_op_release_pt *rpt; |
| 3652 | |
| 3653 | rpt = pv_mmu_read_buffer(buffer, sizeof *rpt); |
| 3654 | if (!rpt) |
| 3655 | return 0; |
| 3656 | return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys); |
| 3657 | } |
| 3658 | default: return 0; |
| 3659 | } |
| 3660 | } |
| 3661 | |
| 3662 | int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, |
| 3663 | gpa_t addr, unsigned long *ret) |
| 3664 | { |
| 3665 | int r; |
Dave Hansen | 6ad18fb | 2008-08-11 10:01:49 -0700 | [diff] [blame] | 3666 | struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer; |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3667 | |
Dave Hansen | 6ad18fb | 2008-08-11 10:01:49 -0700 | [diff] [blame] | 3668 | buffer->ptr = buffer->buf; |
| 3669 | buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf); |
| 3670 | buffer->processed = 0; |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3671 | |
Dave Hansen | 6ad18fb | 2008-08-11 10:01:49 -0700 | [diff] [blame] | 3672 | r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len); |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3673 | if (r) |
| 3674 | goto out; |
| 3675 | |
Dave Hansen | 6ad18fb | 2008-08-11 10:01:49 -0700 | [diff] [blame] | 3676 | while (buffer->len) { |
| 3677 | r = kvm_pv_mmu_op_one(vcpu, buffer); |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3678 | if (r < 0) |
| 3679 | goto out; |
| 3680 | if (r == 0) |
| 3681 | break; |
| 3682 | } |
| 3683 | |
| 3684 | r = 1; |
| 3685 | out: |
Dave Hansen | 6ad18fb | 2008-08-11 10:01:49 -0700 | [diff] [blame] | 3686 | *ret = buffer->processed; |
Marcelo Tosatti | 2f333bc | 2008-02-22 12:21:37 -0500 | [diff] [blame] | 3687 | return r; |
| 3688 | } |
| 3689 | |
Marcelo Tosatti | 94d8b05 | 2009-06-11 12:07:42 -0300 | [diff] [blame] | 3690 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) |
| 3691 | { |
| 3692 | struct kvm_shadow_walk_iterator iterator; |
| 3693 | int nr_sptes = 0; |
| 3694 | |
| 3695 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3696 | for_each_shadow_entry(vcpu, addr, iterator) { |
| 3697 | sptes[iterator.level-1] = *iterator.sptep; |
| 3698 | nr_sptes++; |
| 3699 | if (!is_shadow_present_pte(*iterator.sptep)) |
| 3700 | break; |
| 3701 | } |
| 3702 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3703 | |
| 3704 | return nr_sptes; |
| 3705 | } |
| 3706 | EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); |
| 3707 | |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 3708 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu) |
| 3709 | { |
| 3710 | ASSERT(vcpu); |
| 3711 | |
| 3712 | destroy_kvm_mmu(vcpu); |
| 3713 | free_mmu_pages(vcpu); |
| 3714 | mmu_free_memory_caches(vcpu); |
Xiao Guangrong | b034cf0 | 2010-12-23 16:08:35 +0800 | [diff] [blame] | 3715 | } |
| 3716 | |
| 3717 | #ifdef CONFIG_KVM_MMU_AUDIT |
| 3718 | #include "mmu_audit.c" |
| 3719 | #else |
| 3720 | static void mmu_audit_disable(void) { } |
| 3721 | #endif |
| 3722 | |
| 3723 | void kvm_mmu_module_exit(void) |
| 3724 | { |
| 3725 | mmu_destroy_caches(); |
| 3726 | percpu_counter_destroy(&kvm_total_used_mmu_pages); |
| 3727 | unregister_shrinker(&mmu_shrinker); |
Xiao Guangrong | c42fffe | 2010-09-27 18:07:07 +0800 | [diff] [blame] | 3728 | mmu_audit_disable(); |
| 3729 | } |