blob: dcce533d420c384f2081ab9197f3c4fa50cb8352 [file] [log] [blame]
Xiao Guangrong2f4f3372010-08-30 18:24:10 +08001/*
2 * mmu_audit.c:
3 *
4 * Audit code for KVM MMU
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Xiao Guangrong2f4f3372010-08-30 18:24:10 +08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
Xiao Guangrong30644b92010-08-30 18:26:33 +080020#include <linux/ratelimit.h>
21
Xiao Guangronge37fa782011-11-30 17:43:24 +080022char const *audit_point_name[] = {
23 "pre page fault",
24 "post page fault",
25 "pre pte write",
26 "post pte write",
27 "pre sync",
28 "post sync"
29};
30
Xiao Guangrongb034cf02010-12-23 16:08:35 +080031#define audit_printk(kvm, fmt, args...) \
Xiao Guangrong38904e12010-09-27 18:07:59 +080032 printk(KERN_ERR "audit: (%s) error: " \
Xiao Guangrongb034cf02010-12-23 16:08:35 +080033 fmt, audit_point_name[kvm->arch.audit_point], ##args)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080034
Xiao Guangrongeb259182010-08-30 18:25:51 +080035typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080036
Xiao Guangrongeb259182010-08-30 18:25:51 +080037static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
38 inspect_spte_fn fn, int level)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080039{
40 int i;
41
42 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
Xiao Guangrongeb259182010-08-30 18:25:51 +080043 u64 *ent = sp->spt;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080044
Xiao Guangrongeb259182010-08-30 18:25:51 +080045 fn(vcpu, ent + i, level);
46
47 if (is_shadow_present_pte(ent[i]) &&
48 !is_last_spte(ent[i], level)) {
49 struct kvm_mmu_page *child;
50
51 child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
52 __mmu_spte_walk(vcpu, child, fn, level - 1);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080053 }
54 }
55}
56
57static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
58{
59 int i;
60 struct kvm_mmu_page *sp;
61
62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
63 return;
Xiao Guangrongeb259182010-08-30 18:25:51 +080064
Xiao Guangrong98224bf2010-09-27 18:06:16 +080065 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080066 hpa_t root = vcpu->arch.mmu.root_hpa;
Xiao Guangrongeb259182010-08-30 18:25:51 +080067
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080068 sp = page_header(root);
Xiao Guangrongeb259182010-08-30 18:25:51 +080069 __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080070 return;
71 }
Xiao Guangrongeb259182010-08-30 18:25:51 +080072
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080073 for (i = 0; i < 4; ++i) {
74 hpa_t root = vcpu->arch.mmu.pae_root[i];
75
76 if (root && VALID_PAGE(root)) {
77 root &= PT64_BASE_ADDR_MASK;
78 sp = page_header(root);
Xiao Guangrongeb259182010-08-30 18:25:51 +080079 __mmu_spte_walk(vcpu, sp, fn, 2);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080080 }
81 }
Xiao Guangrongeb259182010-08-30 18:25:51 +080082
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080083 return;
84}
85
Xiao Guangrong49edf872010-08-30 18:25:03 +080086typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
87
88static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
89{
90 struct kvm_mmu_page *sp;
91
92 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
93 fn(kvm, sp);
94}
95
Xiao Guangrongeb259182010-08-30 18:25:51 +080096static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080097{
Xiao Guangrongeb259182010-08-30 18:25:51 +080098 struct kvm_mmu_page *sp;
99 gfn_t gfn;
Dan Williamsba049e92016-01-15 16:56:11 -0800100 kvm_pfn_t pfn;
Xiao Guangrongeb259182010-08-30 18:25:51 +0800101 hpa_t hpa;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800102
Xiao Guangrongeb259182010-08-30 18:25:51 +0800103 sp = page_header(__pa(sptep));
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800104
Xiao Guangrongeb259182010-08-30 18:25:51 +0800105 if (sp->unsync) {
106 if (level != PT_PAGE_TABLE_LEVEL) {
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800107 audit_printk(vcpu->kvm, "unsync sp: %p "
108 "level = %d\n", sp, level);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800109 return;
110 }
Xiao Guangrongeb259182010-08-30 18:25:51 +0800111 }
112
113 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
114 return;
115
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
Paolo Bonzini54bf36a2015-04-08 15:39:23 +0200117 pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
Xiao Guangrongeb259182010-08-30 18:25:51 +0800118
Xiao Guangrongcb9aaa32012-08-03 15:42:10 +0800119 if (is_error_pfn(pfn))
Xiao Guangrongeb259182010-08-30 18:25:51 +0800120 return;
Xiao Guangrongeb259182010-08-30 18:25:51 +0800121
122 hpa = pfn << PAGE_SHIFT;
123 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800124 audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
125 "ent %llxn", vcpu->arch.mmu.root_level, pfn,
126 hpa, *sptep);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800127}
128
Xiao Guangrongeb259182010-08-30 18:25:51 +0800129static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800130{
Jan Kiszkabd801582011-09-12 11:26:22 +0200131 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
Takuya Yoshikawa018aabb2015-11-20 17:41:28 +0900132 struct kvm_rmap_head *rmap_head;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800133 struct kvm_mmu_page *rev_sp;
Paolo Bonzini699023e2015-05-18 15:03:39 +0200134 struct kvm_memslots *slots;
135 struct kvm_memory_slot *slot;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800136 gfn_t gfn;
137
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800138 rev_sp = page_header(__pa(sptep));
139 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
140
Paolo Bonzini699023e2015-05-18 15:03:39 +0200141 slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
142 slot = __gfn_to_memslot(slots, gfn);
143 if (!slot) {
Jan Kiszkabd801582011-09-12 11:26:22 +0200144 if (!__ratelimit(&ratelimit_state))
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800145 return;
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800146 audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
147 audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
Xiao Guangrong38904e12010-09-27 18:07:59 +0800148 (long int)(sptep - rev_sp->spt), rev_sp->gfn);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800149 dump_stack();
150 return;
151 }
152
Takuya Yoshikawa018aabb2015-11-20 17:41:28 +0900153 rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
154 if (!rmap_head->val) {
Jan Kiszkabd801582011-09-12 11:26:22 +0200155 if (!__ratelimit(&ratelimit_state))
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800156 return;
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800157 audit_printk(kvm, "no rmap for writable spte %llx\n",
158 *sptep);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800159 dump_stack();
160 }
161}
162
Xiao Guangrongeb259182010-08-30 18:25:51 +0800163static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800164{
Xiao Guangrongeb259182010-08-30 18:25:51 +0800165 if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
166 inspect_spte_has_rmap(vcpu->kvm, sptep);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800167}
168
Xiao Guangrong69030742010-09-27 18:09:29 +0800169static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
170{
171 struct kvm_mmu_page *sp = page_header(__pa(sptep));
172
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800173 if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
174 audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
175 "root.\n", sp);
Xiao Guangrong69030742010-09-27 18:09:29 +0800176}
177
Xiao Guangrong49edf872010-08-30 18:25:03 +0800178static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800179{
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800180 int i;
181
Xiao Guangrong49edf872010-08-30 18:25:03 +0800182 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
183 return;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800184
Xiao Guangrong49edf872010-08-30 18:25:03 +0800185 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
Takuya Yoshikawaafd28fe2015-11-20 17:44:55 +0900186 if (!is_shadow_present_pte(sp->spt[i]))
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800187 continue;
188
Xiao Guangrong49edf872010-08-30 18:25:03 +0800189 inspect_spte_has_rmap(kvm, sp->spt + i);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800190 }
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800191}
192
Xiao Guangrong69030742010-09-27 18:09:29 +0800193static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800194{
Takuya Yoshikawa018aabb2015-11-20 17:41:28 +0900195 struct kvm_rmap_head *rmap_head;
Takuya Yoshikawa1e3f42f2012-03-21 23:50:34 +0900196 u64 *sptep;
197 struct rmap_iterator iter;
Paolo Bonzinie4cd1da2015-05-18 15:11:46 +0200198 struct kvm_memslots *slots;
199 struct kvm_memory_slot *slot;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800200
Xiao Guangrong49edf872010-08-30 18:25:03 +0800201 if (sp->role.direct || sp->unsync || sp->role.invalid)
202 return;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800203
Paolo Bonzini699023e2015-05-18 15:03:39 +0200204 slots = kvm_memslots_for_spte_role(kvm, sp->role);
Paolo Bonzinie4cd1da2015-05-18 15:11:46 +0200205 slot = __gfn_to_memslot(slots, sp->gfn);
Takuya Yoshikawa018aabb2015-11-20 17:41:28 +0900206 rmap_head = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800207
Takuya Yoshikawa018aabb2015-11-20 17:41:28 +0900208 for_each_rmap_spte(rmap_head, &iter, sptep) {
Takuya Yoshikawa1e3f42f2012-03-21 23:50:34 +0900209 if (is_writable_pte(*sptep))
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800210 audit_printk(kvm, "shadow page has writable "
211 "mappings: gfn %llx role %x\n",
212 sp->gfn, sp->role.word);
Takuya Yoshikawa018aabb2015-11-20 17:41:28 +0900213 }
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800214}
215
Xiao Guangrong49edf872010-08-30 18:25:03 +0800216static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
217{
218 check_mappings_rmap(kvm, sp);
219 audit_write_protection(kvm, sp);
220}
221
222static void audit_all_active_sps(struct kvm *kvm)
223{
224 walk_all_active_sps(kvm, audit_sp);
225}
226
Xiao Guangrongeb259182010-08-30 18:25:51 +0800227static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
228{
229 audit_sptes_have_rmaps(vcpu, sptep, level);
230 audit_mappings(vcpu, sptep, level);
Xiao Guangrong69030742010-09-27 18:09:29 +0800231 audit_spte_after_sync(vcpu, sptep, level);
Xiao Guangrongeb259182010-08-30 18:25:51 +0800232}
233
234static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
235{
236 mmu_spte_walk(vcpu, audit_spte);
237}
238
Xiao Guangrong0375f7f2011-11-28 20:41:00 +0800239static bool mmu_audit;
Ingo Molnarc5905af2012-02-24 08:31:31 +0100240static struct static_key mmu_audit_key;
Xiao Guangrong0375f7f2011-11-28 20:41:00 +0800241
Xiao Guangronge37fa782011-11-30 17:43:24 +0800242static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800243{
Xiao Guangrong30644b92010-08-30 18:26:33 +0800244 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
245
Xiao Guangronge37fa782011-11-30 17:43:24 +0800246 if (!__ratelimit(&ratelimit_state))
247 return;
Xiao Guangrong30644b92010-08-30 18:26:33 +0800248
Xiao Guangronge37fa782011-11-30 17:43:24 +0800249 vcpu->kvm->arch.audit_point = point;
250 audit_all_active_sps(vcpu->kvm);
251 audit_vcpu_spte(vcpu);
252}
253
254static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
255{
Ingo Molnarc5905af2012-02-24 08:31:31 +0100256 if (static_key_false((&mmu_audit_key)))
Xiao Guangronge37fa782011-11-30 17:43:24 +0800257 __kvm_mmu_audit(vcpu, point);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800258}
259
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800260static void mmu_audit_enable(void)
261{
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800262 if (mmu_audit)
263 return;
264
Ingo Molnarc5905af2012-02-24 08:31:31 +0100265 static_key_slow_inc(&mmu_audit_key);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800266 mmu_audit = true;
267}
268
269static void mmu_audit_disable(void)
270{
271 if (!mmu_audit)
272 return;
273
Ingo Molnarc5905af2012-02-24 08:31:31 +0100274 static_key_slow_dec(&mmu_audit_key);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800275 mmu_audit = false;
276}
277
278static int mmu_audit_set(const char *val, const struct kernel_param *kp)
279{
280 int ret;
281 unsigned long enable;
282
Daniel Walter164109e2014-08-08 14:24:03 -0700283 ret = kstrtoul(val, 10, &enable);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800284 if (ret < 0)
285 return -EINVAL;
286
287 switch (enable) {
288 case 0:
289 mmu_audit_disable();
290 break;
291 case 1:
292 mmu_audit_enable();
293 break;
294 default:
295 return -EINVAL;
296 }
297
298 return 0;
299}
300
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930301static const struct kernel_param_ops audit_param_ops = {
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800302 .set = mmu_audit_set,
303 .get = param_get_bool,
304};
305
Sasha Levin521ee0c2013-11-19 15:22:47 -0500306arch_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);