blob: 4aee32c3cf924a150a183107dff60242cecd174f [file] [log] [blame]
Xiao Guangrong2f4f3372010-08-30 18:24:10 +08001/*
2 * mmu_audit.c:
3 *
4 * Audit code for KVM MMU
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affilates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
Xiao Guangrong30644b92010-08-30 18:26:33 +080020#include <linux/ratelimit.h>
21
Xiao Guangrong38904e12010-09-27 18:07:59 +080022static int audit_point;
23
24#define audit_printk(fmt, args...) \
25 printk(KERN_ERR "audit: (%s) error: " \
26 fmt, audit_point_name[audit_point], ##args)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080027
Xiao Guangrongeb259182010-08-30 18:25:51 +080028typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080029
Xiao Guangrongeb259182010-08-30 18:25:51 +080030static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
31 inspect_spte_fn fn, int level)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080032{
33 int i;
34
35 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
Xiao Guangrongeb259182010-08-30 18:25:51 +080036 u64 *ent = sp->spt;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080037
Xiao Guangrongeb259182010-08-30 18:25:51 +080038 fn(vcpu, ent + i, level);
39
40 if (is_shadow_present_pte(ent[i]) &&
41 !is_last_spte(ent[i], level)) {
42 struct kvm_mmu_page *child;
43
44 child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
45 __mmu_spte_walk(vcpu, child, fn, level - 1);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080046 }
47 }
48}
49
50static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
51{
52 int i;
53 struct kvm_mmu_page *sp;
54
55 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
56 return;
Xiao Guangrongeb259182010-08-30 18:25:51 +080057
Xiao Guangrong98224bf2010-09-27 18:06:16 +080058 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080059 hpa_t root = vcpu->arch.mmu.root_hpa;
Xiao Guangrongeb259182010-08-30 18:25:51 +080060
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080061 sp = page_header(root);
Xiao Guangrongeb259182010-08-30 18:25:51 +080062 __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080063 return;
64 }
Xiao Guangrongeb259182010-08-30 18:25:51 +080065
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080066 for (i = 0; i < 4; ++i) {
67 hpa_t root = vcpu->arch.mmu.pae_root[i];
68
69 if (root && VALID_PAGE(root)) {
70 root &= PT64_BASE_ADDR_MASK;
71 sp = page_header(root);
Xiao Guangrongeb259182010-08-30 18:25:51 +080072 __mmu_spte_walk(vcpu, sp, fn, 2);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080073 }
74 }
Xiao Guangrongeb259182010-08-30 18:25:51 +080075
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080076 return;
77}
78
Xiao Guangrong49edf872010-08-30 18:25:03 +080079typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
80
81static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
82{
83 struct kvm_mmu_page *sp;
84
85 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
86 fn(kvm, sp);
87}
88
Xiao Guangrongeb259182010-08-30 18:25:51 +080089static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080090{
Xiao Guangrongeb259182010-08-30 18:25:51 +080091 struct kvm_mmu_page *sp;
92 gfn_t gfn;
93 pfn_t pfn;
94 hpa_t hpa;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080095
Xiao Guangrongeb259182010-08-30 18:25:51 +080096 sp = page_header(__pa(sptep));
Xiao Guangrong2f4f3372010-08-30 18:24:10 +080097
Xiao Guangrongeb259182010-08-30 18:25:51 +080098 if (sp->unsync) {
99 if (level != PT_PAGE_TABLE_LEVEL) {
Xiao Guangrong38904e12010-09-27 18:07:59 +0800100 audit_printk("unsync sp: %p level = %d\n", sp, level);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800101 return;
102 }
103
Xiao Guangrongeb259182010-08-30 18:25:51 +0800104 if (*sptep == shadow_notrap_nonpresent_pte) {
Xiao Guangrong38904e12010-09-27 18:07:59 +0800105 audit_printk("notrap spte in unsync sp: %p\n", sp);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800106 return;
107 }
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800108 }
Xiao Guangrongeb259182010-08-30 18:25:51 +0800109
110 if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
Xiao Guangrong38904e12010-09-27 18:07:59 +0800111 audit_printk("notrap spte in direct sp: %p\n", sp);
Xiao Guangrongeb259182010-08-30 18:25:51 +0800112 return;
113 }
114
115 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
116 return;
117
118 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
119 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
120
121 if (is_error_pfn(pfn)) {
122 kvm_release_pfn_clean(pfn);
123 return;
124 }
125
126 hpa = pfn << PAGE_SHIFT;
127 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
Xiao Guangrong38904e12010-09-27 18:07:59 +0800128 audit_printk("levels %d pfn %llx hpa %llx ent %llxn",
129 vcpu->arch.mmu.root_level, pfn, hpa, *sptep);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800130}
131
Xiao Guangrongeb259182010-08-30 18:25:51 +0800132static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800133{
134 unsigned long *rmapp;
135 struct kvm_mmu_page *rev_sp;
136 gfn_t gfn;
137
138
139 rev_sp = page_header(__pa(sptep));
140 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
141
142 if (!gfn_to_memslot(kvm, gfn)) {
143 if (!printk_ratelimit())
144 return;
Xiao Guangrong38904e12010-09-27 18:07:59 +0800145 audit_printk("no memslot for gfn %llx\n", gfn);
146 audit_printk("index %ld of sp (gfn=%llx)\n",
147 (long int)(sptep - rev_sp->spt), rev_sp->gfn);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800148 dump_stack();
149 return;
150 }
151
152 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
153 if (!*rmapp) {
154 if (!printk_ratelimit())
155 return;
Xiao Guangrong38904e12010-09-27 18:07:59 +0800156 audit_printk("no rmap for writable spte %llx\n", *sptep);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800157 dump_stack();
158 }
159}
160
Xiao Guangrongeb259182010-08-30 18:25:51 +0800161static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800162{
Xiao Guangrongeb259182010-08-30 18:25:51 +0800163 if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
164 inspect_spte_has_rmap(vcpu->kvm, sptep);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800165}
166
Xiao Guangrong69030742010-09-27 18:09:29 +0800167static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
168{
169 struct kvm_mmu_page *sp = page_header(__pa(sptep));
170
171 if (audit_point == AUDIT_POST_SYNC && sp->unsync)
172 audit_printk("meet unsync sp(%p) after sync root.\n", sp);
173}
174
Xiao Guangrong49edf872010-08-30 18:25:03 +0800175static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800176{
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800177 int i;
178
Xiao Guangrong49edf872010-08-30 18:25:03 +0800179 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
180 return;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800181
Xiao Guangrong49edf872010-08-30 18:25:03 +0800182 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
183 if (!is_rmap_spte(sp->spt[i]))
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800184 continue;
185
Xiao Guangrong49edf872010-08-30 18:25:03 +0800186 inspect_spte_has_rmap(kvm, sp->spt + i);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800187 }
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800188}
189
Xiao Guangrong69030742010-09-27 18:09:29 +0800190static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800191{
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800192 struct kvm_memory_slot *slot;
193 unsigned long *rmapp;
194 u64 *spte;
195
Xiao Guangrong49edf872010-08-30 18:25:03 +0800196 if (sp->role.direct || sp->unsync || sp->role.invalid)
197 return;
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800198
Xiao Guangrong49edf872010-08-30 18:25:03 +0800199 slot = gfn_to_memslot(kvm, sp->gfn);
200 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800201
Xiao Guangrong49edf872010-08-30 18:25:03 +0800202 spte = rmap_next(kvm, rmapp, NULL);
203 while (spte) {
204 if (is_writable_pte(*spte))
Xiao Guangrong38904e12010-09-27 18:07:59 +0800205 audit_printk("shadow page has writable mappings: gfn "
206 "%llx role %x\n", sp->gfn, sp->role.word);
Xiao Guangrong49edf872010-08-30 18:25:03 +0800207 spte = rmap_next(kvm, rmapp, spte);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800208 }
209}
210
Xiao Guangrong49edf872010-08-30 18:25:03 +0800211static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
212{
213 check_mappings_rmap(kvm, sp);
214 audit_write_protection(kvm, sp);
215}
216
217static void audit_all_active_sps(struct kvm *kvm)
218{
219 walk_all_active_sps(kvm, audit_sp);
220}
221
Xiao Guangrongeb259182010-08-30 18:25:51 +0800222static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
223{
224 audit_sptes_have_rmaps(vcpu, sptep, level);
225 audit_mappings(vcpu, sptep, level);
Xiao Guangrong69030742010-09-27 18:09:29 +0800226 audit_spte_after_sync(vcpu, sptep, level);
Xiao Guangrongeb259182010-08-30 18:25:51 +0800227}
228
229static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
230{
231 mmu_spte_walk(vcpu, audit_spte);
232}
233
Xiao Guangrong38904e12010-09-27 18:07:59 +0800234static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800235{
Xiao Guangrong30644b92010-08-30 18:26:33 +0800236 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
237
238 if (!__ratelimit(&ratelimit_state))
239 return;
240
Xiao Guangrong38904e12010-09-27 18:07:59 +0800241 audit_point = point;
Xiao Guangrong49edf872010-08-30 18:25:03 +0800242 audit_all_active_sps(vcpu->kvm);
Xiao Guangrongeb259182010-08-30 18:25:51 +0800243 audit_vcpu_spte(vcpu);
Xiao Guangrong2f4f3372010-08-30 18:24:10 +0800244}
245
246static bool mmu_audit;
247
248static void mmu_audit_enable(void)
249{
250 int ret;
251
252 if (mmu_audit)
253 return;
254
255 ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
256 WARN_ON(ret);
257
258 mmu_audit = true;
259}
260
261static void mmu_audit_disable(void)
262{
263 if (!mmu_audit)
264 return;
265
266 unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
267 tracepoint_synchronize_unregister();
268 mmu_audit = false;
269}
270
271static int mmu_audit_set(const char *val, const struct kernel_param *kp)
272{
273 int ret;
274 unsigned long enable;
275
276 ret = strict_strtoul(val, 10, &enable);
277 if (ret < 0)
278 return -EINVAL;
279
280 switch (enable) {
281 case 0:
282 mmu_audit_disable();
283 break;
284 case 1:
285 mmu_audit_enable();
286 break;
287 default:
288 return -EINVAL;
289 }
290
291 return 0;
292}
293
294static struct kernel_param_ops audit_param_ops = {
295 .set = mmu_audit_set,
296 .get = param_get_bool,
297};
298
299module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);