4 * Audit code for KVM MMU
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affilates.
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 * Marcelo Tosatti <mtosatti@redhat.com>
13 * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
20 #include <linux/ratelimit.h>
22 static int audit_point;
24 #define audit_printk(fmt, args...) \
25 printk(KERN_ERR "audit: (%s) error: " \
26 fmt, audit_point_name[audit_point], ##args)
28 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
30 static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
31 inspect_spte_fn fn, int level)
35 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
38 fn(vcpu, ent + i, level);
40 if (is_shadow_present_pte(ent[i]) &&
41 !is_last_spte(ent[i], level)) {
42 struct kvm_mmu_page *child;
44 child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
45 __mmu_spte_walk(vcpu, child, fn, level - 1);
50 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
53 struct kvm_mmu_page *sp;
55 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
58 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
59 hpa_t root = vcpu->arch.mmu.root_hpa;
61 sp = page_header(root);
62 __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
66 for (i = 0; i < 4; ++i) {
67 hpa_t root = vcpu->arch.mmu.pae_root[i];
69 if (root && VALID_PAGE(root)) {
70 root &= PT64_BASE_ADDR_MASK;
71 sp = page_header(root);
72 __mmu_spte_walk(vcpu, sp, fn, 2);
79 typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
81 static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
83 struct kvm_mmu_page *sp;
85 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
89 static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
91 struct kvm_mmu_page *sp;
96 sp = page_header(__pa(sptep));
99 if (level != PT_PAGE_TABLE_LEVEL) {
100 audit_printk("unsync sp: %p level = %d\n", sp, level);
104 if (*sptep == shadow_notrap_nonpresent_pte) {
105 audit_printk("notrap spte in unsync sp: %p\n", sp);
110 if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
111 audit_printk("notrap spte in direct sp: %p\n", sp);
115 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
118 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
119 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
121 if (is_error_pfn(pfn)) {
122 kvm_release_pfn_clean(pfn);
126 hpa = pfn << PAGE_SHIFT;
127 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
128 audit_printk("levels %d pfn %llx hpa %llx ent %llxn",
129 vcpu->arch.mmu.root_level, pfn, hpa, *sptep);
132 static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
134 unsigned long *rmapp;
135 struct kvm_mmu_page *rev_sp;
139 rev_sp = page_header(__pa(sptep));
140 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
142 if (!gfn_to_memslot(kvm, gfn)) {
143 if (!printk_ratelimit())
145 audit_printk("no memslot for gfn %llx\n", gfn);
146 audit_printk("index %ld of sp (gfn=%llx)\n",
147 (long int)(sptep - rev_sp->spt), rev_sp->gfn);
152 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
154 if (!printk_ratelimit())
156 audit_printk("no rmap for writable spte %llx\n", *sptep);
161 static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
163 if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
164 inspect_spte_has_rmap(vcpu->kvm, sptep);
167 static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
171 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
174 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
175 if (!is_rmap_spte(sp->spt[i]))
178 inspect_spte_has_rmap(kvm, sp->spt + i);
182 void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
184 struct kvm_memory_slot *slot;
185 unsigned long *rmapp;
188 if (sp->role.direct || sp->unsync || sp->role.invalid)
191 slot = gfn_to_memslot(kvm, sp->gfn);
192 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
194 spte = rmap_next(kvm, rmapp, NULL);
196 if (is_writable_pte(*spte))
197 audit_printk("shadow page has writable mappings: gfn "
198 "%llx role %x\n", sp->gfn, sp->role.word);
199 spte = rmap_next(kvm, rmapp, spte);
203 static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
205 check_mappings_rmap(kvm, sp);
206 audit_write_protection(kvm, sp);
209 static void audit_all_active_sps(struct kvm *kvm)
211 walk_all_active_sps(kvm, audit_sp);
214 static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
216 audit_sptes_have_rmaps(vcpu, sptep, level);
217 audit_mappings(vcpu, sptep, level);
220 static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
222 mmu_spte_walk(vcpu, audit_spte);
225 static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
227 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
229 if (!__ratelimit(&ratelimit_state))
233 audit_all_active_sps(vcpu->kvm);
234 audit_vcpu_spte(vcpu);
237 static bool mmu_audit;
239 static void mmu_audit_enable(void)
246 ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
252 static void mmu_audit_disable(void)
257 unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
258 tracepoint_synchronize_unregister();
262 static int mmu_audit_set(const char *val, const struct kernel_param *kp)
265 unsigned long enable;
267 ret = strict_strtoul(val, 10, &enable);
285 static struct kernel_param_ops audit_param_ops = {
286 .set = mmu_audit_set,
287 .get = param_get_bool,
290 module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);