blob: fb8a461333c51ea120293461d56968821363d565 [file] [log] [blame]
Xiao Guangrong2f4f3372010-08-30 18:24:10 +08001/*
2 * mmu_audit.c:
3 *
4 * Audit code for KVM MMU
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affilates.
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20static const char *audit_msg;
21
22typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
23
24static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
25 inspect_spte_fn fn)
26{
27 int i;
28
29 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
30 u64 ent = sp->spt[i];
31
32 if (is_shadow_present_pte(ent)) {
33 if (!is_last_spte(ent, sp->role.level)) {
34 struct kvm_mmu_page *child;
35 child = page_header(ent & PT64_BASE_ADDR_MASK);
36 __mmu_spte_walk(kvm, child, fn);
37 } else
38 fn(kvm, &sp->spt[i]);
39 }
40 }
41}
42
43static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
44{
45 int i;
46 struct kvm_mmu_page *sp;
47
48 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
49 return;
50 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
51 hpa_t root = vcpu->arch.mmu.root_hpa;
52 sp = page_header(root);
53 __mmu_spte_walk(vcpu->kvm, sp, fn);
54 return;
55 }
56 for (i = 0; i < 4; ++i) {
57 hpa_t root = vcpu->arch.mmu.pae_root[i];
58
59 if (root && VALID_PAGE(root)) {
60 root &= PT64_BASE_ADDR_MASK;
61 sp = page_header(root);
62 __mmu_spte_walk(vcpu->kvm, sp, fn);
63 }
64 }
65 return;
66}
67
68static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
69 gva_t va, int level)
70{
71 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
72 int i;
73 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
74
75 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
76 u64 *sptep = pt + i;
77 struct kvm_mmu_page *sp;
78 gfn_t gfn;
79 pfn_t pfn;
80 hpa_t hpa;
81
82 sp = page_header(__pa(sptep));
83
84 if (sp->unsync) {
85 if (level != PT_PAGE_TABLE_LEVEL) {
86 printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
87 audit_msg, sp, level);
88 return;
89 }
90
91 if (*sptep == shadow_notrap_nonpresent_pte) {
92 printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
93 audit_msg, sp);
94 return;
95 }
96 }
97
98 if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
99 printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
100 audit_msg, sp);
101 return;
102 }
103
104 if (!is_shadow_present_pte(*sptep) ||
105 !is_last_spte(*sptep, level))
106 return;
107
108 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
109 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
110
111 if (is_error_pfn(pfn)) {
112 kvm_release_pfn_clean(pfn);
113 return;
114 }
115
116 hpa = pfn << PAGE_SHIFT;
117
118 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
119 printk(KERN_ERR "xx audit error: (%s) levels %d"
120 " gva %lx pfn %llx hpa %llx ent %llxn",
121 audit_msg, vcpu->arch.mmu.root_level,
122 va, pfn, hpa, *sptep);
123 }
124}
125
126static void audit_mappings(struct kvm_vcpu *vcpu)
127{
128 unsigned i;
129
130 if (vcpu->arch.mmu.root_level == 4)
131 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
132 else
133 for (i = 0; i < 4; ++i)
134 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
135 audit_mappings_page(vcpu,
136 vcpu->arch.mmu.pae_root[i],
137 i << 30,
138 2);
139}
140
141void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
142{
143 unsigned long *rmapp;
144 struct kvm_mmu_page *rev_sp;
145 gfn_t gfn;
146
147
148 rev_sp = page_header(__pa(sptep));
149 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
150
151 if (!gfn_to_memslot(kvm, gfn)) {
152 if (!printk_ratelimit())
153 return;
154 printk(KERN_ERR "%s: no memslot for gfn %llx\n",
155 audit_msg, gfn);
156 printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
157 audit_msg, (long int)(sptep - rev_sp->spt),
158 rev_sp->gfn);
159 dump_stack();
160 return;
161 }
162
163 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
164 if (!*rmapp) {
165 if (!printk_ratelimit())
166 return;
167 printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
168 audit_msg, *sptep);
169 dump_stack();
170 }
171}
172
173void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
174{
175 mmu_spte_walk(vcpu, inspect_spte_has_rmap);
176}
177
178static void check_mappings_rmap(struct kvm_vcpu *vcpu)
179{
180 struct kvm_mmu_page *sp;
181 int i;
182
183 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
184 u64 *pt = sp->spt;
185
186 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
187 continue;
188
189 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
190 if (!is_rmap_spte(pt[i]))
191 continue;
192
193 inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
194 }
195 }
196 return;
197}
198
199static void audit_rmap(struct kvm_vcpu *vcpu)
200{
201 check_mappings_rmap(vcpu);
202}
203
204static void audit_write_protection(struct kvm_vcpu *vcpu)
205{
206 struct kvm_mmu_page *sp;
207 struct kvm_memory_slot *slot;
208 unsigned long *rmapp;
209 u64 *spte;
210
211 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
212 if (sp->role.direct)
213 continue;
214 if (sp->unsync)
215 continue;
216 if (sp->role.invalid)
217 continue;
218
219 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
220 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
221
222 spte = rmap_next(vcpu->kvm, rmapp, NULL);
223 while (spte) {
224 if (is_writable_pte(*spte))
225 printk(KERN_ERR "%s: (%s) shadow page has "
226 "writable mappings: gfn %llx role %x\n",
227 __func__, audit_msg, sp->gfn,
228 sp->role.word);
229 spte = rmap_next(vcpu->kvm, rmapp, spte);
230 }
231 }
232}
233
234static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
235{
236 audit_msg = audit_point_name[audit_point];
237 audit_rmap(vcpu);
238 audit_write_protection(vcpu);
239 if (strcmp("pre pte write", audit_msg) != 0)
240 audit_mappings(vcpu);
241 audit_sptes_have_rmaps(vcpu);
242}
243
244static bool mmu_audit;
245
246static void mmu_audit_enable(void)
247{
248 int ret;
249
250 if (mmu_audit)
251 return;
252
253 ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
254 WARN_ON(ret);
255
256 mmu_audit = true;
257}
258
259static void mmu_audit_disable(void)
260{
261 if (!mmu_audit)
262 return;
263
264 unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
265 tracepoint_synchronize_unregister();
266 mmu_audit = false;
267}
268
269static int mmu_audit_set(const char *val, const struct kernel_param *kp)
270{
271 int ret;
272 unsigned long enable;
273
274 ret = strict_strtoul(val, 10, &enable);
275 if (ret < 0)
276 return -EINVAL;
277
278 switch (enable) {
279 case 0:
280 mmu_audit_disable();
281 break;
282 case 1:
283 mmu_audit_enable();
284 break;
285 default:
286 return -EINVAL;
287 }
288
289 return 0;
290}
291
292static struct kernel_param_ops audit_param_ops = {
293 .set = mmu_audit_set,
294 .get = param_get_bool,
295};
296
297module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);