Xiao Guangrong | 21ebbed | 2016-02-24 17:51:09 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Support KVM gust page tracking |
| 3 | * |
| 4 | * This feature allows us to track page access in guest. Currently, only |
| 5 | * write access is tracked. |
| 6 | * |
| 7 | * Copyright(C) 2015 Intel Corporation. |
| 8 | * |
| 9 | * Author: |
| 10 | * Xiao Guangrong <guangrong.xiao@linux.intel.com> |
| 11 | * |
| 12 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 13 | * the COPYING file in the top-level directory. |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kvm_host.h> |
| 17 | #include <asm/kvm_host.h> |
| 18 | #include <asm/kvm_page_track.h> |
| 19 | |
| 20 | #include "mmu.h" |
| 21 | |
| 22 | void kvm_page_track_free_memslot(struct kvm_memory_slot *free, |
| 23 | struct kvm_memory_slot *dont) |
| 24 | { |
| 25 | int i; |
| 26 | |
| 27 | for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) |
| 28 | if (!dont || free->arch.gfn_track[i] != |
| 29 | dont->arch.gfn_track[i]) { |
| 30 | kvfree(free->arch.gfn_track[i]); |
| 31 | free->arch.gfn_track[i] = NULL; |
| 32 | } |
| 33 | } |
| 34 | |
| 35 | int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, |
| 36 | unsigned long npages) |
| 37 | { |
| 38 | int i; |
| 39 | |
| 40 | for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { |
| 41 | slot->arch.gfn_track[i] = kvm_kvzalloc(npages * |
| 42 | sizeof(*slot->arch.gfn_track[i])); |
| 43 | if (!slot->arch.gfn_track[i]) |
| 44 | goto track_free; |
| 45 | } |
| 46 | |
| 47 | return 0; |
| 48 | |
| 49 | track_free: |
| 50 | kvm_page_track_free_memslot(slot, NULL); |
| 51 | return -ENOMEM; |
| 52 | } |
Xiao Guangrong | f29d4d7 | 2016-02-24 17:51:10 +0800 | [diff] [blame] | 53 | |
| 54 | static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) |
| 55 | { |
| 56 | if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) |
| 57 | return false; |
| 58 | |
| 59 | return true; |
| 60 | } |
| 61 | |
| 62 | static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, |
| 63 | enum kvm_page_track_mode mode, short count) |
| 64 | { |
| 65 | int index, val; |
| 66 | |
| 67 | index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); |
| 68 | |
| 69 | val = slot->arch.gfn_track[mode][index]; |
| 70 | |
| 71 | if (WARN_ON(val + count < 0 || val + count > USHRT_MAX)) |
| 72 | return; |
| 73 | |
| 74 | slot->arch.gfn_track[mode][index] += count; |
| 75 | } |
| 76 | |
| 77 | /* |
| 78 | * add guest page to the tracking pool so that corresponding access on that |
| 79 | * page will be intercepted. |
| 80 | * |
| 81 | * It should be called under the protection both of mmu-lock and kvm->srcu |
| 82 | * or kvm->slots_lock. |
| 83 | * |
| 84 | * @kvm: the guest instance we are interested in. |
| 85 | * @slot: the @gfn belongs to. |
| 86 | * @gfn: the guest page. |
| 87 | * @mode: tracking mode, currently only write track is supported. |
| 88 | */ |
| 89 | void kvm_slot_page_track_add_page(struct kvm *kvm, |
| 90 | struct kvm_memory_slot *slot, gfn_t gfn, |
| 91 | enum kvm_page_track_mode mode) |
| 92 | { |
| 93 | |
| 94 | if (WARN_ON(!page_track_mode_is_valid(mode))) |
| 95 | return; |
| 96 | |
| 97 | update_gfn_track(slot, gfn, mode, 1); |
| 98 | |
| 99 | /* |
| 100 | * new track stops large page mapping for the |
| 101 | * tracked page. |
| 102 | */ |
| 103 | kvm_mmu_gfn_disallow_lpage(slot, gfn); |
| 104 | |
| 105 | if (mode == KVM_PAGE_TRACK_WRITE) |
| 106 | if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) |
| 107 | kvm_flush_remote_tlbs(kvm); |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * remove the guest page from the tracking pool which stops the interception |
| 112 | * of corresponding access on that page. It is the opposed operation of |
| 113 | * kvm_slot_page_track_add_page(). |
| 114 | * |
| 115 | * It should be called under the protection both of mmu-lock and kvm->srcu |
| 116 | * or kvm->slots_lock. |
| 117 | * |
| 118 | * @kvm: the guest instance we are interested in. |
| 119 | * @slot: the @gfn belongs to. |
| 120 | * @gfn: the guest page. |
| 121 | * @mode: tracking mode, currently only write track is supported. |
| 122 | */ |
| 123 | void kvm_slot_page_track_remove_page(struct kvm *kvm, |
| 124 | struct kvm_memory_slot *slot, gfn_t gfn, |
| 125 | enum kvm_page_track_mode mode) |
| 126 | { |
| 127 | if (WARN_ON(!page_track_mode_is_valid(mode))) |
| 128 | return; |
| 129 | |
| 130 | update_gfn_track(slot, gfn, mode, -1); |
| 131 | |
| 132 | /* |
| 133 | * allow large page mapping for the tracked page |
| 134 | * after the tracker is gone. |
| 135 | */ |
| 136 | kvm_mmu_gfn_allow_lpage(slot, gfn); |
| 137 | } |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 138 | |
| 139 | /* |
| 140 | * check if the corresponding access on the specified guest page is tracked. |
| 141 | */ |
| 142 | bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 143 | enum kvm_page_track_mode mode) |
| 144 | { |
Paolo Bonzini | a6adb10 | 2016-03-22 17:25:42 +0100 | [diff] [blame^] | 145 | struct kvm_memory_slot *slot; |
| 146 | int index; |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 147 | |
| 148 | if (WARN_ON(!page_track_mode_is_valid(mode))) |
| 149 | return false; |
| 150 | |
Paolo Bonzini | a6adb10 | 2016-03-22 17:25:42 +0100 | [diff] [blame^] | 151 | slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); |
| 152 | if (!slot) |
| 153 | return false; |
| 154 | |
| 155 | index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); |
Xiao Guangrong | 3d0c27a | 2016-02-24 17:51:11 +0800 | [diff] [blame] | 156 | return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); |
| 157 | } |
Xiao Guangrong | 0eb05bf | 2016-02-24 17:51:13 +0800 | [diff] [blame] | 158 | |
| 159 | void kvm_page_track_init(struct kvm *kvm) |
| 160 | { |
| 161 | struct kvm_page_track_notifier_head *head; |
| 162 | |
| 163 | head = &kvm->arch.track_notifier_head; |
| 164 | init_srcu_struct(&head->track_srcu); |
| 165 | INIT_HLIST_HEAD(&head->track_notifier_list); |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * register the notifier so that event interception for the tracked guest |
| 170 | * pages can be received. |
| 171 | */ |
| 172 | void |
| 173 | kvm_page_track_register_notifier(struct kvm *kvm, |
| 174 | struct kvm_page_track_notifier_node *n) |
| 175 | { |
| 176 | struct kvm_page_track_notifier_head *head; |
| 177 | |
| 178 | head = &kvm->arch.track_notifier_head; |
| 179 | |
| 180 | spin_lock(&kvm->mmu_lock); |
| 181 | hlist_add_head_rcu(&n->node, &head->track_notifier_list); |
| 182 | spin_unlock(&kvm->mmu_lock); |
| 183 | } |
| 184 | |
| 185 | /* |
| 186 | * stop receiving the event interception. It is the opposed operation of |
| 187 | * kvm_page_track_register_notifier(). |
| 188 | */ |
| 189 | void |
| 190 | kvm_page_track_unregister_notifier(struct kvm *kvm, |
| 191 | struct kvm_page_track_notifier_node *n) |
| 192 | { |
| 193 | struct kvm_page_track_notifier_head *head; |
| 194 | |
| 195 | head = &kvm->arch.track_notifier_head; |
| 196 | |
| 197 | spin_lock(&kvm->mmu_lock); |
| 198 | hlist_del_rcu(&n->node); |
| 199 | spin_unlock(&kvm->mmu_lock); |
| 200 | synchronize_srcu(&head->track_srcu); |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * Notify the node that write access is intercepted and write emulation is |
| 205 | * finished at this time. |
| 206 | * |
| 207 | * The node should figure out if the written page is the one that node is |
| 208 | * interested in by itself. |
| 209 | */ |
| 210 | void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, |
| 211 | int bytes) |
| 212 | { |
| 213 | struct kvm_page_track_notifier_head *head; |
| 214 | struct kvm_page_track_notifier_node *n; |
| 215 | int idx; |
| 216 | |
| 217 | head = &vcpu->kvm->arch.track_notifier_head; |
| 218 | |
| 219 | if (hlist_empty(&head->track_notifier_list)) |
| 220 | return; |
| 221 | |
| 222 | idx = srcu_read_lock(&head->track_srcu); |
| 223 | hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) |
| 224 | if (n->track_write) |
| 225 | n->track_write(vcpu, gpa, new, bytes); |
| 226 | srcu_read_unlock(&head->track_srcu, idx); |
| 227 | } |