Commit | Line | Data |
---|---|---|
2f4f3372 XG |
1 | /* |
2 | * mmu_audit.c: | |
3 | * | |
4 | * Audit code for KVM MMU | |
5 | * | |
6 | * Copyright (C) 2006 Qumranet, Inc. | |
7 | * Copyright 2010 Red Hat, Inc. and/or its affilates. | |
8 | * | |
9 | * Authors: | |
10 | * Yaniv Kamay <yaniv@qumranet.com> | |
11 | * Avi Kivity <avi@qumranet.com> | |
12 | * Marcelo Tosatti <mtosatti@redhat.com> | |
13 | * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | * | |
18 | */ | |
19 | ||
30644b90 XG |
20 | #include <linux/ratelimit.h> |
21 | ||
2f4f3372 XG |
22 | static const char *audit_msg; |
23 | ||
eb259186 | 24 | typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); |
2f4f3372 | 25 | |
eb259186 XG |
26 | static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
27 | inspect_spte_fn fn, int level) | |
2f4f3372 XG |
28 | { |
29 | int i; | |
30 | ||
31 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | |
eb259186 XG |
32 | u64 *ent = sp->spt; |
33 | ||
34 | fn(vcpu, ent + i, level); | |
35 | ||
36 | if (is_shadow_present_pte(ent[i]) && | |
37 | !is_last_spte(ent[i], level)) { | |
38 | struct kvm_mmu_page *child; | |
39 | ||
40 | child = page_header(ent[i] & PT64_BASE_ADDR_MASK); | |
41 | __mmu_spte_walk(vcpu, child, fn, level - 1); | |
2f4f3372 XG |
42 | } |
43 | } | |
44 | } | |
45 | ||
46 | static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) | |
47 | { | |
48 | int i; | |
49 | struct kvm_mmu_page *sp; | |
50 | ||
51 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | |
52 | return; | |
eb259186 | 53 | |
2f4f3372 XG |
54 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
55 | hpa_t root = vcpu->arch.mmu.root_hpa; | |
eb259186 | 56 | |
2f4f3372 | 57 | sp = page_header(root); |
eb259186 | 58 | __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); |
2f4f3372 XG |
59 | return; |
60 | } | |
eb259186 | 61 | |
2f4f3372 XG |
62 | for (i = 0; i < 4; ++i) { |
63 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | |
64 | ||
65 | if (root && VALID_PAGE(root)) { | |
66 | root &= PT64_BASE_ADDR_MASK; | |
67 | sp = page_header(root); | |
eb259186 | 68 | __mmu_spte_walk(vcpu, sp, fn, 2); |
2f4f3372 XG |
69 | } |
70 | } | |
eb259186 | 71 | |
2f4f3372 XG |
72 | return; |
73 | } | |
74 | ||
49edf878 XG |
75 | typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp); |
76 | ||
77 | static void walk_all_active_sps(struct kvm *kvm, sp_handler fn) | |
78 | { | |
79 | struct kvm_mmu_page *sp; | |
80 | ||
81 | list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) | |
82 | fn(kvm, sp); | |
83 | } | |
84 | ||
eb259186 | 85 | static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
2f4f3372 | 86 | { |
eb259186 XG |
87 | struct kvm_mmu_page *sp; |
88 | gfn_t gfn; | |
89 | pfn_t pfn; | |
90 | hpa_t hpa; | |
2f4f3372 | 91 | |
eb259186 XG |
92 | sp = page_header(__pa(sptep)); |
93 | ||
94 | if (sp->unsync) { | |
95 | if (level != PT_PAGE_TABLE_LEVEL) { | |
96 | printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n", | |
97 | audit_msg, sp, level); | |
2f4f3372 XG |
98 | return; |
99 | } | |
100 | ||
eb259186 XG |
101 | if (*sptep == shadow_notrap_nonpresent_pte) { |
102 | printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n", | |
103 | audit_msg, sp); | |
2f4f3372 | 104 | return; |
eb259186 XG |
105 | } |
106 | } | |
2f4f3372 | 107 | |
eb259186 XG |
108 | if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) { |
109 | printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n", | |
110 | audit_msg, sp); | |
111 | return; | |
112 | } | |
2f4f3372 | 113 | |
eb259186 XG |
114 | if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) |
115 | return; | |
2f4f3372 | 116 | |
eb259186 XG |
117 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
118 | pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); | |
2f4f3372 | 119 | |
eb259186 XG |
120 | if (is_error_pfn(pfn)) { |
121 | kvm_release_pfn_clean(pfn); | |
122 | return; | |
2f4f3372 | 123 | } |
2f4f3372 | 124 | |
eb259186 XG |
125 | hpa = pfn << PAGE_SHIFT; |
126 | if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) | |
127 | printk(KERN_ERR "xx audit error: (%s) levels %d" | |
128 | "pfn %llx hpa %llx ent %llxn", | |
129 | audit_msg, vcpu->arch.mmu.root_level, | |
130 | pfn, hpa, *sptep); | |
2f4f3372 XG |
131 | } |
132 | ||
eb259186 | 133 | static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) |
2f4f3372 XG |
134 | { |
135 | unsigned long *rmapp; | |
136 | struct kvm_mmu_page *rev_sp; | |
137 | gfn_t gfn; | |
138 | ||
139 | ||
140 | rev_sp = page_header(__pa(sptep)); | |
141 | gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); | |
142 | ||
143 | if (!gfn_to_memslot(kvm, gfn)) { | |
144 | if (!printk_ratelimit()) | |
145 | return; | |
146 | printk(KERN_ERR "%s: no memslot for gfn %llx\n", | |
147 | audit_msg, gfn); | |
148 | printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n", | |
149 | audit_msg, (long int)(sptep - rev_sp->spt), | |
150 | rev_sp->gfn); | |
151 | dump_stack(); | |
152 | return; | |
153 | } | |
154 | ||
155 | rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); | |
156 | if (!*rmapp) { | |
157 | if (!printk_ratelimit()) | |
158 | return; | |
159 | printk(KERN_ERR "%s: no rmap for writable spte %llx\n", | |
160 | audit_msg, *sptep); | |
161 | dump_stack(); | |
162 | } | |
163 | } | |
164 | ||
eb259186 | 165 | static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
2f4f3372 | 166 | { |
eb259186 XG |
167 | if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level)) |
168 | inspect_spte_has_rmap(vcpu->kvm, sptep); | |
2f4f3372 XG |
169 | } |
170 | ||
49edf878 | 171 | static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) |
2f4f3372 | 172 | { |
2f4f3372 XG |
173 | int i; |
174 | ||
49edf878 XG |
175 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) |
176 | return; | |
2f4f3372 | 177 | |
49edf878 XG |
178 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
179 | if (!is_rmap_spte(sp->spt[i])) | |
2f4f3372 XG |
180 | continue; |
181 | ||
49edf878 | 182 | inspect_spte_has_rmap(kvm, sp->spt + i); |
2f4f3372 | 183 | } |
2f4f3372 XG |
184 | } |
185 | ||
49edf878 | 186 | void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) |
2f4f3372 | 187 | { |
2f4f3372 XG |
188 | struct kvm_memory_slot *slot; |
189 | unsigned long *rmapp; | |
190 | u64 *spte; | |
191 | ||
49edf878 XG |
192 | if (sp->role.direct || sp->unsync || sp->role.invalid) |
193 | return; | |
2f4f3372 | 194 | |
49edf878 XG |
195 | slot = gfn_to_memslot(kvm, sp->gfn); |
196 | rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; | |
2f4f3372 | 197 | |
49edf878 XG |
198 | spte = rmap_next(kvm, rmapp, NULL); |
199 | while (spte) { | |
200 | if (is_writable_pte(*spte)) | |
201 | printk(KERN_ERR "%s: (%s) shadow page has " | |
2f4f3372 XG |
202 | "writable mappings: gfn %llx role %x\n", |
203 | __func__, audit_msg, sp->gfn, | |
204 | sp->role.word); | |
49edf878 | 205 | spte = rmap_next(kvm, rmapp, spte); |
2f4f3372 XG |
206 | } |
207 | } | |
208 | ||
49edf878 XG |
209 | static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
210 | { | |
211 | check_mappings_rmap(kvm, sp); | |
212 | audit_write_protection(kvm, sp); | |
213 | } | |
214 | ||
215 | static void audit_all_active_sps(struct kvm *kvm) | |
216 | { | |
217 | walk_all_active_sps(kvm, audit_sp); | |
218 | } | |
219 | ||
eb259186 XG |
220 | static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
221 | { | |
222 | audit_sptes_have_rmaps(vcpu, sptep, level); | |
223 | audit_mappings(vcpu, sptep, level); | |
224 | } | |
225 | ||
226 | static void audit_vcpu_spte(struct kvm_vcpu *vcpu) | |
227 | { | |
228 | mmu_spte_walk(vcpu, audit_spte); | |
229 | } | |
230 | ||
2f4f3372 XG |
231 | static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point) |
232 | { | |
30644b90 XG |
233 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
234 | ||
235 | if (!__ratelimit(&ratelimit_state)) | |
236 | return; | |
237 | ||
2f4f3372 | 238 | audit_msg = audit_point_name[audit_point]; |
49edf878 | 239 | audit_all_active_sps(vcpu->kvm); |
eb259186 | 240 | audit_vcpu_spte(vcpu); |
2f4f3372 XG |
241 | } |
242 | ||
243 | static bool mmu_audit; | |
244 | ||
245 | static void mmu_audit_enable(void) | |
246 | { | |
247 | int ret; | |
248 | ||
249 | if (mmu_audit) | |
250 | return; | |
251 | ||
252 | ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); | |
253 | WARN_ON(ret); | |
254 | ||
255 | mmu_audit = true; | |
256 | } | |
257 | ||
258 | static void mmu_audit_disable(void) | |
259 | { | |
260 | if (!mmu_audit) | |
261 | return; | |
262 | ||
263 | unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); | |
264 | tracepoint_synchronize_unregister(); | |
265 | mmu_audit = false; | |
266 | } | |
267 | ||
268 | static int mmu_audit_set(const char *val, const struct kernel_param *kp) | |
269 | { | |
270 | int ret; | |
271 | unsigned long enable; | |
272 | ||
273 | ret = strict_strtoul(val, 10, &enable); | |
274 | if (ret < 0) | |
275 | return -EINVAL; | |
276 | ||
277 | switch (enable) { | |
278 | case 0: | |
279 | mmu_audit_disable(); | |
280 | break; | |
281 | case 1: | |
282 | mmu_audit_enable(); | |
283 | break; | |
284 | default: | |
285 | return -EINVAL; | |
286 | } | |
287 | ||
288 | return 0; | |
289 | } | |
290 | ||
291 | static struct kernel_param_ops audit_param_ops = { | |
292 | .set = mmu_audit_set, | |
293 | .get = param_get_bool, | |
294 | }; | |
295 | ||
296 | module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644); |