2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
24 #include "kvm_cache_regs.h"
27 #include <linux/kvm_host.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/swap.h>
34 #include <linux/hugetlb.h>
35 #include <linux/compiler.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
41 #include <asm/cmpxchg.h>
46 * When setting this variable to true it enables Two-Dimensional-Paging
47 * where the hardware walks 2 page tables:
48 * 1. the guest-virtual to guest-physical
49 * 2. while doing 1. it walks guest-physical to host-physical
50 * If the hardware supports that we don't need to do shadow paging.
52 bool tdp_enabled = false;
56 AUDIT_POST_PAGE_FAULT,
63 char *audit_point_name[] = {
76 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
77 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
81 #define pgprintk(x...) do { } while (0)
82 #define rmap_printk(x...) do { } while (0)
88 module_param(dbg, bool, 0644);
91 static int oos_shadow = 1;
92 module_param(oos_shadow, bool, 0644);
95 #define ASSERT(x) do { } while (0)
99 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
100 __FILE__, __LINE__, #x); \
104 #define PTE_PREFETCH_NUM 8
106 #define PT_FIRST_AVAIL_BITS_SHIFT 9
107 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
109 #define PT64_LEVEL_BITS 9
111 #define PT64_LEVEL_SHIFT(level) \
112 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
114 #define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
118 #define PT32_LEVEL_BITS 10
120 #define PT32_LEVEL_SHIFT(level) \
121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
123 #define PT32_LVL_OFFSET_MASK(level) \
124 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
125 * PT32_LEVEL_BITS))) - 1))
127 #define PT32_INDEX(address, level)\
128 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
131 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
132 #define PT64_DIR_BASE_ADDR_MASK \
133 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
134 #define PT64_LVL_ADDR_MASK(level) \
135 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
136 * PT64_LEVEL_BITS))) - 1))
137 #define PT64_LVL_OFFSET_MASK(level) \
138 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
139 * PT64_LEVEL_BITS))) - 1))
141 #define PT32_BASE_ADDR_MASK PAGE_MASK
142 #define PT32_DIR_BASE_ADDR_MASK \
143 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
144 #define PT32_LVL_ADDR_MASK(level) \
145 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
146 * PT32_LEVEL_BITS))) - 1))
148 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
153 #define ACC_EXEC_MASK 1
154 #define ACC_WRITE_MASK PT_WRITABLE_MASK
155 #define ACC_USER_MASK PT_USER_MASK
156 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
158 #include <trace/events/kvm.h>
160 #define CREATE_TRACE_POINTS
161 #include "mmutrace.h"
163 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
165 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
167 struct kvm_rmap_desc {
168 u64 *sptes[RMAP_EXT];
169 struct kvm_rmap_desc *more;
172 struct kvm_shadow_walk_iterator {
180 #define for_each_shadow_entry(_vcpu, _addr, _walker) \
181 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
182 shadow_walk_okay(&(_walker)); \
183 shadow_walk_next(&(_walker)))
185 typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
187 static struct kmem_cache *pte_chain_cache;
188 static struct kmem_cache *rmap_desc_cache;
189 static struct kmem_cache *mmu_page_header_cache;
190 static struct percpu_counter kvm_total_used_mmu_pages;
192 static u64 __read_mostly shadow_trap_nonpresent_pte;
193 static u64 __read_mostly shadow_notrap_nonpresent_pte;
194 static u64 __read_mostly shadow_nx_mask;
195 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
196 static u64 __read_mostly shadow_user_mask;
197 static u64 __read_mostly shadow_accessed_mask;
198 static u64 __read_mostly shadow_dirty_mask;
200 static inline u64 rsvd_bits(int s, int e)
202 return ((1ULL << (e - s + 1)) - 1) << s;
205 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
207 shadow_trap_nonpresent_pte = trap_pte;
208 shadow_notrap_nonpresent_pte = notrap_pte;
210 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
212 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
213 u64 dirty_mask, u64 nx_mask, u64 x_mask)
215 shadow_user_mask = user_mask;
216 shadow_accessed_mask = accessed_mask;
217 shadow_dirty_mask = dirty_mask;
218 shadow_nx_mask = nx_mask;
219 shadow_x_mask = x_mask;
221 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
223 static bool is_write_protection(struct kvm_vcpu *vcpu)
225 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
228 static int is_cpuid_PSE36(void)
233 static int is_nx(struct kvm_vcpu *vcpu)
235 return vcpu->arch.efer & EFER_NX;
238 static int is_shadow_present_pte(u64 pte)
240 return pte != shadow_trap_nonpresent_pte
241 && pte != shadow_notrap_nonpresent_pte;
244 static int is_large_pte(u64 pte)
246 return pte & PT_PAGE_SIZE_MASK;
249 static int is_writable_pte(unsigned long pte)
251 return pte & PT_WRITABLE_MASK;
254 static int is_dirty_gpte(unsigned long pte)
256 return pte & PT_DIRTY_MASK;
259 static int is_rmap_spte(u64 pte)
261 return is_shadow_present_pte(pte);
264 static int is_last_spte(u64 pte, int level)
266 if (level == PT_PAGE_TABLE_LEVEL)
268 if (is_large_pte(pte))
273 static pfn_t spte_to_pfn(u64 pte)
275 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
278 static gfn_t pse36_gfn_delta(u32 gpte)
280 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
282 return (gpte & PT32_DIR_PSE36_MASK) << shift;
285 static void __set_spte(u64 *sptep, u64 spte)
287 set_64bit(sptep, spte);
290 static u64 __xchg_spte(u64 *sptep, u64 new_spte)
293 return xchg(sptep, new_spte);
299 } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
305 static bool spte_has_volatile_bits(u64 spte)
307 if (!shadow_accessed_mask)
310 if (!is_shadow_present_pte(spte))
313 if ((spte & shadow_accessed_mask) &&
314 (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
320 static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
322 return (old_spte & bit_mask) && !(new_spte & bit_mask);
325 static void update_spte(u64 *sptep, u64 new_spte)
327 u64 mask, old_spte = *sptep;
329 WARN_ON(!is_rmap_spte(new_spte));
331 new_spte |= old_spte & shadow_dirty_mask;
333 mask = shadow_accessed_mask;
334 if (is_writable_pte(old_spte))
335 mask |= shadow_dirty_mask;
337 if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
338 __set_spte(sptep, new_spte);
340 old_spte = __xchg_spte(sptep, new_spte);
342 if (!shadow_accessed_mask)
345 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
346 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
347 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
348 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
351 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
352 struct kmem_cache *base_cache, int min)
356 if (cache->nobjs >= min)
358 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
359 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
362 cache->objects[cache->nobjs++] = obj;
367 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
368 struct kmem_cache *cache)
371 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
374 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
379 if (cache->nobjs >= min)
381 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
382 page = (void *)__get_free_page(GFP_KERNEL);
385 cache->objects[cache->nobjs++] = page;
390 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
393 free_page((unsigned long)mc->objects[--mc->nobjs]);
396 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
400 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
404 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
405 rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
408 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
411 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
412 mmu_page_header_cache, 4);
417 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
419 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
420 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
421 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
422 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
423 mmu_page_header_cache);
426 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
432 p = mc->objects[--mc->nobjs];
436 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
438 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
439 sizeof(struct kvm_pte_chain));
442 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
444 kmem_cache_free(pte_chain_cache, pc);
447 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
449 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
450 sizeof(struct kvm_rmap_desc));
453 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
455 kmem_cache_free(rmap_desc_cache, rd);
458 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
460 if (!sp->role.direct)
461 return sp->gfns[index];
463 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
466 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
469 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
471 sp->gfns[index] = gfn;
475 * Return the pointer to the large page information for a given gfn,
476 * handling slots that are not large page aligned.
478 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
479 struct kvm_memory_slot *slot,
484 idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
485 (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
486 return &slot->lpage_info[level - 2][idx];
489 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
491 struct kvm_memory_slot *slot;
492 struct kvm_lpage_info *linfo;
495 slot = gfn_to_memslot(kvm, gfn);
496 for (i = PT_DIRECTORY_LEVEL;
497 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
498 linfo = lpage_info_slot(gfn, slot, i);
499 linfo->write_count += 1;
503 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
505 struct kvm_memory_slot *slot;
506 struct kvm_lpage_info *linfo;
509 slot = gfn_to_memslot(kvm, gfn);
510 for (i = PT_DIRECTORY_LEVEL;
511 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
512 linfo = lpage_info_slot(gfn, slot, i);
513 linfo->write_count -= 1;
514 WARN_ON(linfo->write_count < 0);
518 static int has_wrprotected_page(struct kvm *kvm,
522 struct kvm_memory_slot *slot;
523 struct kvm_lpage_info *linfo;
525 slot = gfn_to_memslot(kvm, gfn);
527 linfo = lpage_info_slot(gfn, slot, level);
528 return linfo->write_count;
534 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
536 unsigned long page_size;
539 page_size = kvm_host_page_size(kvm, gfn);
541 for (i = PT_PAGE_TABLE_LEVEL;
542 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
543 if (page_size >= KVM_HPAGE_SIZE(i))
552 static struct kvm_memory_slot *
553 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
556 struct kvm_memory_slot *slot;
558 slot = gfn_to_memslot(vcpu->kvm, gfn);
559 if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
560 (no_dirty_log && slot->dirty_bitmap))
566 static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
568 return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
571 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
573 int host_level, level, max_level;
575 host_level = host_mapping_level(vcpu->kvm, large_gfn);
577 if (host_level == PT_PAGE_TABLE_LEVEL)
580 max_level = kvm_x86_ops->get_lpage_level() < host_level ?
581 kvm_x86_ops->get_lpage_level() : host_level;
583 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
584 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
591 * Take gfn and return the reverse mapping to it.
594 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
596 struct kvm_memory_slot *slot;
597 struct kvm_lpage_info *linfo;
599 slot = gfn_to_memslot(kvm, gfn);
600 if (likely(level == PT_PAGE_TABLE_LEVEL))
601 return &slot->rmap[gfn - slot->base_gfn];
603 linfo = lpage_info_slot(gfn, slot, level);
605 return &linfo->rmap_pde;
609 * Reverse mapping data structures:
611 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
612 * that points to page_address(page).
614 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
615 * containing more mappings.
617 * Returns the number of rmap entries before the spte was added or zero if
618 * the spte was not added.
621 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
623 struct kvm_mmu_page *sp;
624 struct kvm_rmap_desc *desc;
625 unsigned long *rmapp;
628 if (!is_rmap_spte(*spte))
630 sp = page_header(__pa(spte));
631 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
632 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
634 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
635 *rmapp = (unsigned long)spte;
636 } else if (!(*rmapp & 1)) {
637 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
638 desc = mmu_alloc_rmap_desc(vcpu);
639 desc->sptes[0] = (u64 *)*rmapp;
640 desc->sptes[1] = spte;
641 *rmapp = (unsigned long)desc | 1;
644 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
645 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
646 while (desc->sptes[RMAP_EXT-1] && desc->more) {
650 if (desc->sptes[RMAP_EXT-1]) {
651 desc->more = mmu_alloc_rmap_desc(vcpu);
654 for (i = 0; desc->sptes[i]; ++i)
656 desc->sptes[i] = spte;
661 static void rmap_desc_remove_entry(unsigned long *rmapp,
662 struct kvm_rmap_desc *desc,
664 struct kvm_rmap_desc *prev_desc)
668 for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
670 desc->sptes[i] = desc->sptes[j];
671 desc->sptes[j] = NULL;
674 if (!prev_desc && !desc->more)
675 *rmapp = (unsigned long)desc->sptes[0];
678 prev_desc->more = desc->more;
680 *rmapp = (unsigned long)desc->more | 1;
681 mmu_free_rmap_desc(desc);
684 static void rmap_remove(struct kvm *kvm, u64 *spte)
686 struct kvm_rmap_desc *desc;
687 struct kvm_rmap_desc *prev_desc;
688 struct kvm_mmu_page *sp;
690 unsigned long *rmapp;
693 sp = page_header(__pa(spte));
694 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
695 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
697 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
699 } else if (!(*rmapp & 1)) {
700 rmap_printk("rmap_remove: %p 1->0\n", spte);
701 if ((u64 *)*rmapp != spte) {
702 printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte);
707 rmap_printk("rmap_remove: %p many->many\n", spte);
708 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
711 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
712 if (desc->sptes[i] == spte) {
713 rmap_desc_remove_entry(rmapp,
721 pr_err("rmap_remove: %p many->many\n", spte);
726 static int set_spte_track_bits(u64 *sptep, u64 new_spte)
729 u64 old_spte = *sptep;
731 if (!spte_has_volatile_bits(old_spte))
732 __set_spte(sptep, new_spte);
734 old_spte = __xchg_spte(sptep, new_spte);
736 if (!is_rmap_spte(old_spte))
739 pfn = spte_to_pfn(old_spte);
740 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
741 kvm_set_pfn_accessed(pfn);
742 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
743 kvm_set_pfn_dirty(pfn);
747 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
749 if (set_spte_track_bits(sptep, new_spte))
750 rmap_remove(kvm, sptep);
753 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
755 struct kvm_rmap_desc *desc;
761 else if (!(*rmapp & 1)) {
763 return (u64 *)*rmapp;
766 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
769 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
770 if (prev_spte == spte)
771 return desc->sptes[i];
772 prev_spte = desc->sptes[i];
779 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
781 unsigned long *rmapp;
783 int i, write_protected = 0;
785 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
787 spte = rmap_next(kvm, rmapp, NULL);
790 BUG_ON(!(*spte & PT_PRESENT_MASK));
791 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
792 if (is_writable_pte(*spte)) {
793 update_spte(spte, *spte & ~PT_WRITABLE_MASK);
796 spte = rmap_next(kvm, rmapp, spte);
799 /* check for huge page mappings */
800 for (i = PT_DIRECTORY_LEVEL;
801 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
802 rmapp = gfn_to_rmap(kvm, gfn, i);
803 spte = rmap_next(kvm, rmapp, NULL);
806 BUG_ON(!(*spte & PT_PRESENT_MASK));
807 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
808 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
809 if (is_writable_pte(*spte)) {
811 shadow_trap_nonpresent_pte);
816 spte = rmap_next(kvm, rmapp, spte);
820 return write_protected;
823 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
827 int need_tlb_flush = 0;
829 while ((spte = rmap_next(kvm, rmapp, NULL))) {
830 BUG_ON(!(*spte & PT_PRESENT_MASK));
831 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
832 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
835 return need_tlb_flush;
838 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
843 pte_t *ptep = (pte_t *)data;
846 WARN_ON(pte_huge(*ptep));
847 new_pfn = pte_pfn(*ptep);
848 spte = rmap_next(kvm, rmapp, NULL);
850 BUG_ON(!is_shadow_present_pte(*spte));
851 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
853 if (pte_write(*ptep)) {
854 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
855 spte = rmap_next(kvm, rmapp, NULL);
857 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
858 new_spte |= (u64)new_pfn << PAGE_SHIFT;
860 new_spte &= ~PT_WRITABLE_MASK;
861 new_spte &= ~SPTE_HOST_WRITEABLE;
862 new_spte &= ~shadow_accessed_mask;
863 set_spte_track_bits(spte, new_spte);
864 spte = rmap_next(kvm, rmapp, spte);
868 kvm_flush_remote_tlbs(kvm);
873 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
875 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
881 struct kvm_memslots *slots;
883 slots = kvm_memslots(kvm);
885 for (i = 0; i < slots->nmemslots; i++) {
886 struct kvm_memory_slot *memslot = &slots->memslots[i];
887 unsigned long start = memslot->userspace_addr;
890 end = start + (memslot->npages << PAGE_SHIFT);
891 if (hva >= start && hva < end) {
892 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
893 gfn_t gfn = memslot->base_gfn + gfn_offset;
895 ret = handler(kvm, &memslot->rmap[gfn_offset], data);
897 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
898 struct kvm_lpage_info *linfo;
900 linfo = lpage_info_slot(gfn, memslot,
901 PT_DIRECTORY_LEVEL + j);
902 ret |= handler(kvm, &linfo->rmap_pde, data);
904 trace_kvm_age_page(hva, memslot, ret);
912 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
914 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
917 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
919 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
922 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
929 * Emulate the accessed bit for EPT, by checking if this page has
930 * an EPT mapping, and clearing it if it does. On the next access,
931 * a new EPT mapping will be established.
932 * This has some overhead, but not as much as the cost of swapping
933 * out actively used pages or breaking up actively used hugepages.
935 if (!shadow_accessed_mask)
936 return kvm_unmap_rmapp(kvm, rmapp, data);
938 spte = rmap_next(kvm, rmapp, NULL);
942 BUG_ON(!(_spte & PT_PRESENT_MASK));
943 _young = _spte & PT_ACCESSED_MASK;
946 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
948 spte = rmap_next(kvm, rmapp, spte);
953 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
960 * If there's no access bit in the secondary pte set by the
961 * hardware it's up to gup-fast/gup to set the access bit in
962 * the primary pte or in the page structure.
964 if (!shadow_accessed_mask)
967 spte = rmap_next(kvm, rmapp, NULL);
970 BUG_ON(!(_spte & PT_PRESENT_MASK));
971 young = _spte & PT_ACCESSED_MASK;
976 spte = rmap_next(kvm, rmapp, spte);
982 #define RMAP_RECYCLE_THRESHOLD 1000
984 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
986 unsigned long *rmapp;
987 struct kvm_mmu_page *sp;
989 sp = page_header(__pa(spte));
991 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
993 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
994 kvm_flush_remote_tlbs(vcpu->kvm);
997 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
999 return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
1002 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1004 return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1008 static int is_empty_shadow_page(u64 *spt)
1013 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1014 if (is_shadow_present_pte(*pos)) {
1015 printk(KERN_ERR "%s: %p %llx\n", __func__,
1024 * This value is the sum of all of the kvm instances's
1025 * kvm->arch.n_used_mmu_pages values. We need a global,
1026 * aggregate version in order to make the slab shrinker
1029 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1031 kvm->arch.n_used_mmu_pages += nr;
1032 percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1035 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1037 ASSERT(is_empty_shadow_page(sp->spt));
1038 hlist_del(&sp->hash_link);
1039 list_del(&sp->link);
1040 free_page((unsigned long)sp->spt);
1041 if (!sp->role.direct)
1042 free_page((unsigned long)sp->gfns);
1043 kmem_cache_free(mmu_page_header_cache, sp);
1044 kvm_mod_used_mmu_pages(kvm, -1);
1047 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1049 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
1052 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1053 u64 *parent_pte, int direct)
1055 struct kvm_mmu_page *sp;
1057 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
1058 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
1060 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
1062 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1063 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1064 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
1065 sp->multimapped = 0;
1066 sp->parent_pte = parent_pte;
1067 kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1071 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1072 struct kvm_mmu_page *sp, u64 *parent_pte)
1074 struct kvm_pte_chain *pte_chain;
1075 struct hlist_node *node;
1080 if (!sp->multimapped) {
1081 u64 *old = sp->parent_pte;
1084 sp->parent_pte = parent_pte;
1087 sp->multimapped = 1;
1088 pte_chain = mmu_alloc_pte_chain(vcpu);
1089 INIT_HLIST_HEAD(&sp->parent_ptes);
1090 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1091 pte_chain->parent_ptes[0] = old;
1093 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
1094 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
1096 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
1097 if (!pte_chain->parent_ptes[i]) {
1098 pte_chain->parent_ptes[i] = parent_pte;
1102 pte_chain = mmu_alloc_pte_chain(vcpu);
1104 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
1105 pte_chain->parent_ptes[0] = parent_pte;
1108 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1111 struct kvm_pte_chain *pte_chain;
1112 struct hlist_node *node;
1115 if (!sp->multimapped) {
1116 BUG_ON(sp->parent_pte != parent_pte);
1117 sp->parent_pte = NULL;
1120 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1121 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1122 if (!pte_chain->parent_ptes[i])
1124 if (pte_chain->parent_ptes[i] != parent_pte)
1126 while (i + 1 < NR_PTE_CHAIN_ENTRIES
1127 && pte_chain->parent_ptes[i + 1]) {
1128 pte_chain->parent_ptes[i]
1129 = pte_chain->parent_ptes[i + 1];
1132 pte_chain->parent_ptes[i] = NULL;
1134 hlist_del(&pte_chain->link);
1135 mmu_free_pte_chain(pte_chain);
1136 if (hlist_empty(&sp->parent_ptes)) {
1137 sp->multimapped = 0;
1138 sp->parent_pte = NULL;
1146 static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
1148 struct kvm_pte_chain *pte_chain;
1149 struct hlist_node *node;
1150 struct kvm_mmu_page *parent_sp;
1153 if (!sp->multimapped && sp->parent_pte) {
1154 parent_sp = page_header(__pa(sp->parent_pte));
1155 fn(parent_sp, sp->parent_pte);
1159 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1160 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1161 u64 *spte = pte_chain->parent_ptes[i];
1165 parent_sp = page_header(__pa(spte));
1166 fn(parent_sp, spte);
1170 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
1171 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1173 mmu_parent_walk(sp, mark_unsync);
1176 static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
1180 index = spte - sp->spt;
1181 if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1183 if (sp->unsync_children++)
1185 kvm_mmu_mark_parents_unsync(sp);
1188 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1189 struct kvm_mmu_page *sp)
1193 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1194 sp->spt[i] = shadow_trap_nonpresent_pte;
1197 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1198 struct kvm_mmu_page *sp)
1203 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1207 #define KVM_PAGE_ARRAY_NR 16
1209 struct kvm_mmu_pages {
1210 struct mmu_page_and_offset {
1211 struct kvm_mmu_page *sp;
1213 } page[KVM_PAGE_ARRAY_NR];
1217 #define for_each_unsync_children(bitmap, idx) \
1218 for (idx = find_first_bit(bitmap, 512); \
1220 idx = find_next_bit(bitmap, 512, idx+1))
1222 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1228 for (i=0; i < pvec->nr; i++)
1229 if (pvec->page[i].sp == sp)
1232 pvec->page[pvec->nr].sp = sp;
1233 pvec->page[pvec->nr].idx = idx;
1235 return (pvec->nr == KVM_PAGE_ARRAY_NR);
1238 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1239 struct kvm_mmu_pages *pvec)
1241 int i, ret, nr_unsync_leaf = 0;
1243 for_each_unsync_children(sp->unsync_child_bitmap, i) {
1244 struct kvm_mmu_page *child;
1245 u64 ent = sp->spt[i];
1247 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1248 goto clear_child_bitmap;
1250 child = page_header(ent & PT64_BASE_ADDR_MASK);
1252 if (child->unsync_children) {
1253 if (mmu_pages_add(pvec, child, i))
1256 ret = __mmu_unsync_walk(child, pvec);
1258 goto clear_child_bitmap;
1260 nr_unsync_leaf += ret;
1263 } else if (child->unsync) {
1265 if (mmu_pages_add(pvec, child, i))
1268 goto clear_child_bitmap;
1273 __clear_bit(i, sp->unsync_child_bitmap);
1274 sp->unsync_children--;
1275 WARN_ON((int)sp->unsync_children < 0);
1279 return nr_unsync_leaf;
1282 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1283 struct kvm_mmu_pages *pvec)
1285 if (!sp->unsync_children)
1288 mmu_pages_add(pvec, sp, 0);
1289 return __mmu_unsync_walk(sp, pvec);
1292 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1294 WARN_ON(!sp->unsync);
1295 trace_kvm_mmu_sync_page(sp);
1297 --kvm->stat.mmu_unsync;
1300 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1301 struct list_head *invalid_list);
1302 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1303 struct list_head *invalid_list);
1305 #define for_each_gfn_sp(kvm, sp, gfn, pos) \
1306 hlist_for_each_entry(sp, pos, \
1307 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1308 if ((sp)->gfn != (gfn)) {} else
1310 #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
1311 hlist_for_each_entry(sp, pos, \
1312 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1313 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1314 (sp)->role.invalid) {} else
1316 /* @sp->gfn should be write-protected at the call site */
1317 static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1318 struct list_head *invalid_list, bool clear_unsync)
1320 if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1321 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1326 kvm_unlink_unsync_page(vcpu->kvm, sp);
1328 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1329 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1333 kvm_mmu_flush_tlb(vcpu);
1337 static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1338 struct kvm_mmu_page *sp)
1340 LIST_HEAD(invalid_list);
1343 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
1345 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1350 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1351 struct list_head *invalid_list)
1353 return __kvm_sync_page(vcpu, sp, invalid_list, true);
1356 /* @gfn should be write-protected at the call site */
1357 static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1359 struct kvm_mmu_page *s;
1360 struct hlist_node *node;
1361 LIST_HEAD(invalid_list);
1364 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1368 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1369 kvm_unlink_unsync_page(vcpu->kvm, s);
1370 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
1371 (vcpu->arch.mmu.sync_page(vcpu, s))) {
1372 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
1378 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1380 kvm_mmu_flush_tlb(vcpu);
1383 struct mmu_page_path {
1384 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1385 unsigned int idx[PT64_ROOT_LEVEL-1];
1388 #define for_each_sp(pvec, sp, parents, i) \
1389 for (i = mmu_pages_next(&pvec, &parents, -1), \
1390 sp = pvec.page[i].sp; \
1391 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1392 i = mmu_pages_next(&pvec, &parents, i))
1394 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1395 struct mmu_page_path *parents,
1400 for (n = i+1; n < pvec->nr; n++) {
1401 struct kvm_mmu_page *sp = pvec->page[n].sp;
1403 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1404 parents->idx[0] = pvec->page[n].idx;
1408 parents->parent[sp->role.level-2] = sp;
1409 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1415 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1417 struct kvm_mmu_page *sp;
1418 unsigned int level = 0;
1421 unsigned int idx = parents->idx[level];
1423 sp = parents->parent[level];
1427 --sp->unsync_children;
1428 WARN_ON((int)sp->unsync_children < 0);
1429 __clear_bit(idx, sp->unsync_child_bitmap);
1431 } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1434 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1435 struct mmu_page_path *parents,
1436 struct kvm_mmu_pages *pvec)
1438 parents->parent[parent->role.level-1] = NULL;
1442 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1443 struct kvm_mmu_page *parent)
1446 struct kvm_mmu_page *sp;
1447 struct mmu_page_path parents;
1448 struct kvm_mmu_pages pages;
1449 LIST_HEAD(invalid_list);
1451 kvm_mmu_pages_init(parent, &parents, &pages);
1452 while (mmu_unsync_walk(parent, &pages)) {
1455 for_each_sp(pages, sp, parents, i)
1456 protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1459 kvm_flush_remote_tlbs(vcpu->kvm);
1461 for_each_sp(pages, sp, parents, i) {
1462 kvm_sync_page(vcpu, sp, &invalid_list);
1463 mmu_pages_clear_parents(&parents);
1465 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1466 cond_resched_lock(&vcpu->kvm->mmu_lock);
1467 kvm_mmu_pages_init(parent, &parents, &pages);
1471 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1479 union kvm_mmu_page_role role;
1481 struct kvm_mmu_page *sp;
1482 struct hlist_node *node;
1483 bool need_sync = false;
1485 role = vcpu->arch.mmu.base_role;
1487 role.direct = direct;
1490 role.access = access;
1491 if (!vcpu->arch.mmu.direct_map
1492 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1493 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1494 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1495 role.quadrant = quadrant;
1497 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
1498 if (!need_sync && sp->unsync)
1501 if (sp->role.word != role.word)
1504 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
1507 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1508 if (sp->unsync_children) {
1509 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1510 kvm_mmu_mark_parents_unsync(sp);
1511 } else if (sp->unsync)
1512 kvm_mmu_mark_parents_unsync(sp);
1514 trace_kvm_mmu_get_page(sp, false);
1517 ++vcpu->kvm->stat.mmu_cache_miss;
1518 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
1523 hlist_add_head(&sp->hash_link,
1524 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
1526 if (rmap_write_protect(vcpu->kvm, gfn))
1527 kvm_flush_remote_tlbs(vcpu->kvm);
1528 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
1529 kvm_sync_pages(vcpu, gfn);
1531 account_shadowed(vcpu->kvm, gfn);
1533 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1534 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1536 nonpaging_prefetch_page(vcpu, sp);
1537 trace_kvm_mmu_get_page(sp, true);
1541 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1542 struct kvm_vcpu *vcpu, u64 addr)
1544 iterator->addr = addr;
1545 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1546 iterator->level = vcpu->arch.mmu.shadow_root_level;
1548 if (iterator->level == PT64_ROOT_LEVEL &&
1549 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
1550 !vcpu->arch.mmu.direct_map)
1553 if (iterator->level == PT32E_ROOT_LEVEL) {
1554 iterator->shadow_addr
1555 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1556 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1558 if (!iterator->shadow_addr)
1559 iterator->level = 0;
1563 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1565 if (iterator->level < PT_PAGE_TABLE_LEVEL)
1568 if (iterator->level == PT_PAGE_TABLE_LEVEL)
1569 if (is_large_pte(*iterator->sptep))
1572 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1573 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1577 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1579 iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1583 static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
1587 spte = __pa(sp->spt)
1588 | PT_PRESENT_MASK | PT_ACCESSED_MASK
1589 | PT_WRITABLE_MASK | PT_USER_MASK;
1590 __set_spte(sptep, spte);
1593 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1595 if (is_large_pte(*sptep)) {
1596 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1597 kvm_flush_remote_tlbs(vcpu->kvm);
1601 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1602 unsigned direct_access)
1604 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
1605 struct kvm_mmu_page *child;
1608 * For the direct sp, if the guest pte's dirty bit
1609 * changed form clean to dirty, it will corrupt the
1610 * sp's access: allow writable in the read-only sp,
1611 * so we should update the spte at this point to get
1612 * a new sp with the correct access.
1614 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
1615 if (child->role.access == direct_access)
1618 mmu_page_remove_parent_pte(child, sptep);
1619 __set_spte(sptep, shadow_trap_nonpresent_pte);
1620 kvm_flush_remote_tlbs(vcpu->kvm);
1624 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1625 struct kvm_mmu_page *sp)
1633 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1636 if (is_shadow_present_pte(ent)) {
1637 if (!is_last_spte(ent, sp->role.level)) {
1638 ent &= PT64_BASE_ADDR_MASK;
1639 mmu_page_remove_parent_pte(page_header(ent),
1642 if (is_large_pte(ent))
1644 drop_spte(kvm, &pt[i],
1645 shadow_trap_nonpresent_pte);
1648 pt[i] = shadow_trap_nonpresent_pte;
1652 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1654 mmu_page_remove_parent_pte(sp, parent_pte);
1657 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1660 struct kvm_vcpu *vcpu;
1662 kvm_for_each_vcpu(i, vcpu, kvm)
1663 vcpu->arch.last_pte_updated = NULL;
1666 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1670 while (sp->multimapped || sp->parent_pte) {
1671 if (!sp->multimapped)
1672 parent_pte = sp->parent_pte;
1674 struct kvm_pte_chain *chain;
1676 chain = container_of(sp->parent_ptes.first,
1677 struct kvm_pte_chain, link);
1678 parent_pte = chain->parent_ptes[0];
1680 BUG_ON(!parent_pte);
1681 kvm_mmu_put_page(sp, parent_pte);
1682 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1686 static int mmu_zap_unsync_children(struct kvm *kvm,
1687 struct kvm_mmu_page *parent,
1688 struct list_head *invalid_list)
1691 struct mmu_page_path parents;
1692 struct kvm_mmu_pages pages;
1694 if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1697 kvm_mmu_pages_init(parent, &parents, &pages);
1698 while (mmu_unsync_walk(parent, &pages)) {
1699 struct kvm_mmu_page *sp;
1701 for_each_sp(pages, sp, parents, i) {
1702 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
1703 mmu_pages_clear_parents(&parents);
1706 kvm_mmu_pages_init(parent, &parents, &pages);
1712 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1713 struct list_head *invalid_list)
1717 trace_kvm_mmu_prepare_zap_page(sp);
1718 ++kvm->stat.mmu_shadow_zapped;
1719 ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
1720 kvm_mmu_page_unlink_children(kvm, sp);
1721 kvm_mmu_unlink_parents(kvm, sp);
1722 if (!sp->role.invalid && !sp->role.direct)
1723 unaccount_shadowed(kvm, sp->gfn);
1725 kvm_unlink_unsync_page(kvm, sp);
1726 if (!sp->root_count) {
1729 list_move(&sp->link, invalid_list);
1731 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1732 kvm_reload_remote_mmus(kvm);
1735 sp->role.invalid = 1;
1736 kvm_mmu_reset_last_pte_updated(kvm);
1740 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1741 struct list_head *invalid_list)
1743 struct kvm_mmu_page *sp;
1745 if (list_empty(invalid_list))
1748 kvm_flush_remote_tlbs(kvm);
1751 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1752 WARN_ON(!sp->role.invalid || sp->root_count);
1753 kvm_mmu_free_page(kvm, sp);
1754 } while (!list_empty(invalid_list));
1759 * Changing the number of mmu pages allocated to the vm
1760 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1762 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1764 LIST_HEAD(invalid_list);
1766 * If we set the number of mmu pages to be smaller be than the
1767 * number of actived pages , we must to free some mmu pages before we
1771 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1772 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
1773 !list_empty(&kvm->arch.active_mmu_pages)) {
1774 struct kvm_mmu_page *page;
1776 page = container_of(kvm->arch.active_mmu_pages.prev,
1777 struct kvm_mmu_page, link);
1778 kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
1779 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1781 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1784 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
1787 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1789 struct kvm_mmu_page *sp;
1790 struct hlist_node *node;
1791 LIST_HEAD(invalid_list);
1794 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
1797 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1798 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
1801 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1803 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1807 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1809 struct kvm_mmu_page *sp;
1810 struct hlist_node *node;
1811 LIST_HEAD(invalid_list);
1813 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
1814 pgprintk("%s: zap %llx %x\n",
1815 __func__, gfn, sp->role.word);
1816 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
1818 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1821 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1823 int slot = memslot_id(kvm, gfn);
1824 struct kvm_mmu_page *sp = page_header(__pa(pte));
1826 __set_bit(slot, sp->slot_bitmap);
1829 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1834 if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1837 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1838 if (pt[i] == shadow_notrap_nonpresent_pte)
1839 __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1844 * The function is based on mtrr_type_lookup() in
1845 * arch/x86/kernel/cpu/mtrr/generic.c
1847 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1852 u8 prev_match, curr_match;
1853 int num_var_ranges = KVM_NR_VAR_MTRR;
1855 if (!mtrr_state->enabled)
1858 /* Make end inclusive end, instead of exclusive */
1861 /* Look in fixed ranges. Just return the type as per start */
1862 if (mtrr_state->have_fixed && (start < 0x100000)) {
1865 if (start < 0x80000) {
1867 idx += (start >> 16);
1868 return mtrr_state->fixed_ranges[idx];
1869 } else if (start < 0xC0000) {
1871 idx += ((start - 0x80000) >> 14);
1872 return mtrr_state->fixed_ranges[idx];
1873 } else if (start < 0x1000000) {
1875 idx += ((start - 0xC0000) >> 12);
1876 return mtrr_state->fixed_ranges[idx];
1881 * Look in variable ranges
1882 * Look of multiple ranges matching this address and pick type
1883 * as per MTRR precedence
1885 if (!(mtrr_state->enabled & 2))
1886 return mtrr_state->def_type;
1889 for (i = 0; i < num_var_ranges; ++i) {
1890 unsigned short start_state, end_state;
1892 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1895 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1896 (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1897 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1898 (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1900 start_state = ((start & mask) == (base & mask));
1901 end_state = ((end & mask) == (base & mask));
1902 if (start_state != end_state)
1905 if ((start & mask) != (base & mask))
1908 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1909 if (prev_match == 0xFF) {
1910 prev_match = curr_match;
1914 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1915 curr_match == MTRR_TYPE_UNCACHABLE)
1916 return MTRR_TYPE_UNCACHABLE;
1918 if ((prev_match == MTRR_TYPE_WRBACK &&
1919 curr_match == MTRR_TYPE_WRTHROUGH) ||
1920 (prev_match == MTRR_TYPE_WRTHROUGH &&
1921 curr_match == MTRR_TYPE_WRBACK)) {
1922 prev_match = MTRR_TYPE_WRTHROUGH;
1923 curr_match = MTRR_TYPE_WRTHROUGH;
1926 if (prev_match != curr_match)
1927 return MTRR_TYPE_UNCACHABLE;
1930 if (prev_match != 0xFF)
1933 return mtrr_state->def_type;
1936 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1940 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1941 (gfn << PAGE_SHIFT) + PAGE_SIZE);
1942 if (mtrr == 0xfe || mtrr == 0xff)
1943 mtrr = MTRR_TYPE_WRBACK;
1946 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1948 static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1950 trace_kvm_mmu_unsync_page(sp);
1951 ++vcpu->kvm->stat.mmu_unsync;
1954 kvm_mmu_mark_parents_unsync(sp);
1955 mmu_convert_notrap(sp);
1958 static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1960 struct kvm_mmu_page *s;
1961 struct hlist_node *node;
1963 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1966 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1967 __kvm_unsync_page(vcpu, s);
1971 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1974 struct kvm_mmu_page *s;
1975 struct hlist_node *node;
1976 bool need_unsync = false;
1978 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
1982 if (s->role.level != PT_PAGE_TABLE_LEVEL)
1985 if (!need_unsync && !s->unsync) {
1992 kvm_unsync_pages(vcpu, gfn);
1996 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1997 unsigned pte_access, int user_fault,
1998 int write_fault, int dirty, int level,
1999 gfn_t gfn, pfn_t pfn, bool speculative,
2000 bool can_unsync, bool host_writable)
2002 u64 spte, entry = *sptep;
2006 * We don't set the accessed bit, since we sometimes want to see
2007 * whether the guest actually used the pte (in order to detect
2010 spte = PT_PRESENT_MASK;
2012 spte |= shadow_accessed_mask;
2014 pte_access &= ~ACC_WRITE_MASK;
2015 if (pte_access & ACC_EXEC_MASK)
2016 spte |= shadow_x_mask;
2018 spte |= shadow_nx_mask;
2019 if (pte_access & ACC_USER_MASK)
2020 spte |= shadow_user_mask;
2021 if (level > PT_PAGE_TABLE_LEVEL)
2022 spte |= PT_PAGE_SIZE_MASK;
2024 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2025 kvm_is_mmio_pfn(pfn));
2028 spte |= SPTE_HOST_WRITEABLE;
2030 pte_access &= ~ACC_WRITE_MASK;
2032 spte |= (u64)pfn << PAGE_SHIFT;
2034 if ((pte_access & ACC_WRITE_MASK)
2035 || (!vcpu->arch.mmu.direct_map && write_fault
2036 && !is_write_protection(vcpu) && !user_fault)) {
2038 if (level > PT_PAGE_TABLE_LEVEL &&
2039 has_wrprotected_page(vcpu->kvm, gfn, level)) {
2041 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2045 spte |= PT_WRITABLE_MASK;
2047 if (!vcpu->arch.mmu.direct_map
2048 && !(pte_access & ACC_WRITE_MASK))
2049 spte &= ~PT_USER_MASK;
2052 * Optimization: for pte sync, if spte was writable the hash
2053 * lookup is unnecessary (and expensive). Write protection
2054 * is responsibility of mmu_get_page / kvm_sync_page.
2055 * Same reasoning can be applied to dirty page accounting.
2057 if (!can_unsync && is_writable_pte(*sptep))
2060 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2061 pgprintk("%s: found shadow page for %llx, marking ro\n",
2064 pte_access &= ~ACC_WRITE_MASK;
2065 if (is_writable_pte(spte))
2066 spte &= ~PT_WRITABLE_MASK;
2070 if (pte_access & ACC_WRITE_MASK)
2071 mark_page_dirty(vcpu->kvm, gfn);
2074 update_spte(sptep, spte);
2076 * If we overwrite a writable spte with a read-only one we
2077 * should flush remote TLBs. Otherwise rmap_write_protect
2078 * will find a read-only spte, even though the writable spte
2079 * might be cached on a CPU's TLB.
2081 if (is_writable_pte(entry) && !is_writable_pte(*sptep))
2082 kvm_flush_remote_tlbs(vcpu->kvm);
2087 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2088 unsigned pt_access, unsigned pte_access,
2089 int user_fault, int write_fault, int dirty,
2090 int *ptwrite, int level, gfn_t gfn,
2091 pfn_t pfn, bool speculative,
2094 int was_rmapped = 0;
2097 pgprintk("%s: spte %llx access %x write_fault %d"
2098 " user_fault %d gfn %llx\n",
2099 __func__, *sptep, pt_access,
2100 write_fault, user_fault, gfn);
2102 if (is_rmap_spte(*sptep)) {
2104 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2105 * the parent of the now unreachable PTE.
2107 if (level > PT_PAGE_TABLE_LEVEL &&
2108 !is_large_pte(*sptep)) {
2109 struct kvm_mmu_page *child;
2112 child = page_header(pte & PT64_BASE_ADDR_MASK);
2113 mmu_page_remove_parent_pte(child, sptep);
2114 __set_spte(sptep, shadow_trap_nonpresent_pte);
2115 kvm_flush_remote_tlbs(vcpu->kvm);
2116 } else if (pfn != spte_to_pfn(*sptep)) {
2117 pgprintk("hfn old %llx new %llx\n",
2118 spte_to_pfn(*sptep), pfn);
2119 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
2120 kvm_flush_remote_tlbs(vcpu->kvm);
2125 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
2126 dirty, level, gfn, pfn, speculative, true,
2130 kvm_mmu_flush_tlb(vcpu);
2133 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2134 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2135 is_large_pte(*sptep)? "2MB" : "4kB",
2136 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2138 if (!was_rmapped && is_large_pte(*sptep))
2139 ++vcpu->kvm->stat.lpages;
2141 page_header_update_slot(vcpu->kvm, sptep, gfn);
2143 rmap_count = rmap_add(vcpu, sptep, gfn);
2144 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2145 rmap_recycle(vcpu, sptep, gfn);
2147 kvm_release_pfn_clean(pfn);
2149 vcpu->arch.last_pte_updated = sptep;
2150 vcpu->arch.last_pte_gfn = gfn;
2154 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2158 static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2161 struct kvm_memory_slot *slot;
2164 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2167 return page_to_pfn(bad_page);
2170 hva = gfn_to_hva_memslot(slot, gfn);
2172 return hva_to_pfn_atomic(vcpu->kvm, hva);
2175 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2176 struct kvm_mmu_page *sp,
2177 u64 *start, u64 *end)
2179 struct page *pages[PTE_PREFETCH_NUM];
2180 unsigned access = sp->role.access;
2184 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2185 if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
2188 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
2192 for (i = 0; i < ret; i++, gfn++, start++)
2193 mmu_set_spte(vcpu, start, ACC_ALL,
2194 access, 0, 0, 1, NULL,
2195 sp->role.level, gfn,
2196 page_to_pfn(pages[i]), true, true);
2201 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2202 struct kvm_mmu_page *sp, u64 *sptep)
2204 u64 *spte, *start = NULL;
2207 WARN_ON(!sp->role.direct);
2209 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2212 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2213 if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
2216 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2224 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2226 struct kvm_mmu_page *sp;
2229 * Since it's no accessed bit on EPT, it's no way to
2230 * distinguish between actually accessed translations
2231 * and prefetched, so disable pte prefetch if EPT is
2234 if (!shadow_accessed_mask)
2237 sp = page_header(__pa(sptep));
2238 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2241 __direct_pte_prefetch(vcpu, sp, sptep);
2244 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2245 int map_writable, int level, gfn_t gfn, pfn_t pfn,
2248 struct kvm_shadow_walk_iterator iterator;
2249 struct kvm_mmu_page *sp;
2253 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2254 if (iterator.level == level) {
2255 unsigned pte_access = ACC_ALL;
2257 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
2258 0, write, 1, &pt_write,
2259 level, gfn, pfn, prefault, map_writable);
2260 direct_pte_prefetch(vcpu, iterator.sptep);
2261 ++vcpu->stat.pf_fixed;
2265 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
2266 u64 base_addr = iterator.addr;
2268 base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2269 pseudo_gfn = base_addr >> PAGE_SHIFT;
2270 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2272 1, ACC_ALL, iterator.sptep);
2274 pgprintk("nonpaging_map: ENOMEM\n");
2275 kvm_release_pfn_clean(pfn);
2279 __set_spte(iterator.sptep,
2281 | PT_PRESENT_MASK | PT_WRITABLE_MASK
2282 | shadow_user_mask | shadow_x_mask
2283 | shadow_accessed_mask);
2289 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2293 info.si_signo = SIGBUS;
2295 info.si_code = BUS_MCEERR_AR;
2296 info.si_addr = (void __user *)address;
2297 info.si_addr_lsb = PAGE_SHIFT;
2299 send_sig_info(SIGBUS, &info, tsk);
2302 static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
2304 kvm_release_pfn_clean(pfn);
2305 if (is_hwpoison_pfn(pfn)) {
2306 kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
2308 } else if (is_fault_pfn(pfn))
2314 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2315 gfn_t *gfnp, pfn_t *pfnp, int *levelp)
2319 int level = *levelp;
2322 * Check if it's a transparent hugepage. If this would be an
2323 * hugetlbfs page, level wouldn't be set to
2324 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2327 if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
2328 level == PT_PAGE_TABLE_LEVEL &&
2329 PageTransCompound(pfn_to_page(pfn)) &&
2330 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
2333 * mmu_notifier_retry was successful and we hold the
2334 * mmu_lock here, so the pmd can't become splitting
2335 * from under us, and in turn
2336 * __split_huge_page_refcount() can't run from under
2337 * us and we can safely transfer the refcount from
2338 * PG_tail to PG_head as we switch the pfn to tail to
2341 *levelp = level = PT_DIRECTORY_LEVEL;
2342 mask = KVM_PAGES_PER_HPAGE(level) - 1;
2343 VM_BUG_ON((gfn & mask) != (pfn & mask));
2347 kvm_release_pfn_clean(pfn);
2349 if (!get_page_unless_zero(pfn_to_page(pfn)))
2356 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2357 gva_t gva, pfn_t *pfn, bool write, bool *writable);
2359 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
2366 unsigned long mmu_seq;
2369 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
2370 if (likely(!force_pt_level)) {
2371 level = mapping_level(vcpu, gfn);
2373 * This path builds a PAE pagetable - so we can map
2374 * 2mb pages at maximum. Therefore check if the level
2375 * is larger than that.
2377 if (level > PT_DIRECTORY_LEVEL)
2378 level = PT_DIRECTORY_LEVEL;
2380 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2382 level = PT_PAGE_TABLE_LEVEL;
2384 mmu_seq = vcpu->kvm->mmu_notifier_seq;
2387 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
2391 if (is_error_pfn(pfn))
2392 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2394 spin_lock(&vcpu->kvm->mmu_lock);
2395 if (mmu_notifier_retry(vcpu, mmu_seq))
2397 kvm_mmu_free_some_pages(vcpu);
2398 if (likely(!force_pt_level))
2399 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
2400 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
2402 spin_unlock(&vcpu->kvm->mmu_lock);
2408 spin_unlock(&vcpu->kvm->mmu_lock);
2409 kvm_release_pfn_clean(pfn);
2414 static void mmu_free_roots(struct kvm_vcpu *vcpu)
2417 struct kvm_mmu_page *sp;
2418 LIST_HEAD(invalid_list);
2420 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2422 spin_lock(&vcpu->kvm->mmu_lock);
2423 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
2424 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
2425 vcpu->arch.mmu.direct_map)) {
2426 hpa_t root = vcpu->arch.mmu.root_hpa;
2428 sp = page_header(root);
2430 if (!sp->root_count && sp->role.invalid) {
2431 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2432 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2434 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2435 spin_unlock(&vcpu->kvm->mmu_lock);
2438 for (i = 0; i < 4; ++i) {
2439 hpa_t root = vcpu->arch.mmu.pae_root[i];
2442 root &= PT64_BASE_ADDR_MASK;
2443 sp = page_header(root);
2445 if (!sp->root_count && sp->role.invalid)
2446 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2449 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2451 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2452 spin_unlock(&vcpu->kvm->mmu_lock);
2453 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2456 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2460 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2461 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2468 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
2470 struct kvm_mmu_page *sp;
2473 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2474 spin_lock(&vcpu->kvm->mmu_lock);
2475 kvm_mmu_free_some_pages(vcpu);
2476 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
2479 spin_unlock(&vcpu->kvm->mmu_lock);
2480 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
2481 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
2482 for (i = 0; i < 4; ++i) {
2483 hpa_t root = vcpu->arch.mmu.pae_root[i];
2485 ASSERT(!VALID_PAGE(root));
2486 spin_lock(&vcpu->kvm->mmu_lock);
2487 kvm_mmu_free_some_pages(vcpu);
2488 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
2490 PT32_ROOT_LEVEL, 1, ACC_ALL,
2492 root = __pa(sp->spt);
2494 spin_unlock(&vcpu->kvm->mmu_lock);
2495 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2497 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2504 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2506 struct kvm_mmu_page *sp;
2511 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
2513 if (mmu_check_root(vcpu, root_gfn))
2517 * Do we shadow a long mode page table? If so we need to
2518 * write-protect the guests page table root.
2520 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2521 hpa_t root = vcpu->arch.mmu.root_hpa;
2523 ASSERT(!VALID_PAGE(root));
2525 spin_lock(&vcpu->kvm->mmu_lock);
2526 kvm_mmu_free_some_pages(vcpu);
2527 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
2529 root = __pa(sp->spt);
2531 spin_unlock(&vcpu->kvm->mmu_lock);
2532 vcpu->arch.mmu.root_hpa = root;
2537 * We shadow a 32 bit page table. This may be a legacy 2-level
2538 * or a PAE 3-level page table. In either case we need to be aware that
2539 * the shadow page table may be a PAE or a long mode page table.
2541 pm_mask = PT_PRESENT_MASK;
2542 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
2543 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
2545 for (i = 0; i < 4; ++i) {
2546 hpa_t root = vcpu->arch.mmu.pae_root[i];
2548 ASSERT(!VALID_PAGE(root));
2549 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2550 pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i);
2551 if (!is_present_gpte(pdptr)) {
2552 vcpu->arch.mmu.pae_root[i] = 0;
2555 root_gfn = pdptr >> PAGE_SHIFT;
2556 if (mmu_check_root(vcpu, root_gfn))
2559 spin_lock(&vcpu->kvm->mmu_lock);
2560 kvm_mmu_free_some_pages(vcpu);
2561 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2564 root = __pa(sp->spt);
2566 spin_unlock(&vcpu->kvm->mmu_lock);
2568 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
2570 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2573 * If we shadow a 32 bit page table with a long mode page
2574 * table we enter this path.
2576 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2577 if (vcpu->arch.mmu.lm_root == NULL) {
2579 * The additional page necessary for this is only
2580 * allocated on demand.
2585 lm_root = (void*)get_zeroed_page(GFP_KERNEL);
2586 if (lm_root == NULL)
2589 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
2591 vcpu->arch.mmu.lm_root = lm_root;
2594 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
2600 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2602 if (vcpu->arch.mmu.direct_map)
2603 return mmu_alloc_direct_roots(vcpu);
2605 return mmu_alloc_shadow_roots(vcpu);
2608 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2611 struct kvm_mmu_page *sp;
2613 if (vcpu->arch.mmu.direct_map)
2616 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2619 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2620 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2621 hpa_t root = vcpu->arch.mmu.root_hpa;
2622 sp = page_header(root);
2623 mmu_sync_children(vcpu, sp);
2624 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2627 for (i = 0; i < 4; ++i) {
2628 hpa_t root = vcpu->arch.mmu.pae_root[i];
2630 if (root && VALID_PAGE(root)) {
2631 root &= PT64_BASE_ADDR_MASK;
2632 sp = page_header(root);
2633 mmu_sync_children(vcpu, sp);
2636 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2639 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2641 spin_lock(&vcpu->kvm->mmu_lock);
2642 mmu_sync_roots(vcpu);
2643 spin_unlock(&vcpu->kvm->mmu_lock);
2646 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2647 u32 access, struct x86_exception *exception)
2650 exception->error_code = 0;
2654 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2656 struct x86_exception *exception)
2659 exception->error_code = 0;
2660 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
2663 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2664 u32 error_code, bool prefault)
2669 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2670 r = mmu_topup_memory_caches(vcpu);
2675 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2677 gfn = gva >> PAGE_SHIFT;
2679 return nonpaging_map(vcpu, gva & PAGE_MASK,
2680 error_code & PFERR_WRITE_MASK, gfn, prefault);
2683 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
2685 struct kvm_arch_async_pf arch;
2687 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
2689 arch.direct_map = vcpu->arch.mmu.direct_map;
2690 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
2692 return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
2695 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
2697 if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
2698 kvm_event_needs_reinjection(vcpu)))
2701 return kvm_x86_ops->interrupt_allowed(vcpu);
2704 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2705 gva_t gva, pfn_t *pfn, bool write, bool *writable)
2709 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
2712 return false; /* *pfn has correct page already */
2714 put_page(pfn_to_page(*pfn));
2716 if (!prefault && can_do_async_pf(vcpu)) {
2717 trace_kvm_try_async_get_page(gva, gfn);
2718 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
2719 trace_kvm_async_pf_doublefault(gva, gfn);
2720 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
2722 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
2726 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
2731 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2738 gfn_t gfn = gpa >> PAGE_SHIFT;
2739 unsigned long mmu_seq;
2740 int write = error_code & PFERR_WRITE_MASK;
2744 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2746 r = mmu_topup_memory_caches(vcpu);
2750 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
2751 if (likely(!force_pt_level)) {
2752 level = mapping_level(vcpu, gfn);
2753 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2755 level = PT_PAGE_TABLE_LEVEL;
2757 mmu_seq = vcpu->kvm->mmu_notifier_seq;
2760 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
2764 if (is_error_pfn(pfn))
2765 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
2766 spin_lock(&vcpu->kvm->mmu_lock);
2767 if (mmu_notifier_retry(vcpu, mmu_seq))
2769 kvm_mmu_free_some_pages(vcpu);
2770 if (likely(!force_pt_level))
2771 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
2772 r = __direct_map(vcpu, gpa, write, map_writable,
2773 level, gfn, pfn, prefault);
2774 spin_unlock(&vcpu->kvm->mmu_lock);
2779 spin_unlock(&vcpu->kvm->mmu_lock);
2780 kvm_release_pfn_clean(pfn);
2784 static void nonpaging_free(struct kvm_vcpu *vcpu)
2786 mmu_free_roots(vcpu);
2789 static int nonpaging_init_context(struct kvm_vcpu *vcpu,
2790 struct kvm_mmu *context)
2792 context->new_cr3 = nonpaging_new_cr3;
2793 context->page_fault = nonpaging_page_fault;
2794 context->gva_to_gpa = nonpaging_gva_to_gpa;
2795 context->free = nonpaging_free;
2796 context->prefetch_page = nonpaging_prefetch_page;
2797 context->sync_page = nonpaging_sync_page;
2798 context->invlpg = nonpaging_invlpg;
2799 context->root_level = 0;
2800 context->shadow_root_level = PT32E_ROOT_LEVEL;
2801 context->root_hpa = INVALID_PAGE;
2802 context->direct_map = true;
2803 context->nx = false;
2807 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2809 ++vcpu->stat.tlb_flush;
2810 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2813 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2815 pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
2816 mmu_free_roots(vcpu);
2819 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
2821 return kvm_read_cr3(vcpu);
2824 static void inject_page_fault(struct kvm_vcpu *vcpu,
2825 struct x86_exception *fault)
2827 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
2830 static void paging_free(struct kvm_vcpu *vcpu)
2832 nonpaging_free(vcpu);
2835 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2839 bit7 = (gpte >> 7) & 1;
2840 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2844 #include "paging_tmpl.h"
2848 #include "paging_tmpl.h"
2851 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
2852 struct kvm_mmu *context,
2855 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2856 u64 exb_bit_rsvd = 0;
2859 exb_bit_rsvd = rsvd_bits(63, 63);
2861 case PT32_ROOT_LEVEL:
2862 /* no rsvd bits for 2 level 4K page table entries */
2863 context->rsvd_bits_mask[0][1] = 0;
2864 context->rsvd_bits_mask[0][0] = 0;
2865 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2867 if (!is_pse(vcpu)) {
2868 context->rsvd_bits_mask[1][1] = 0;
2872 if (is_cpuid_PSE36())
2873 /* 36bits PSE 4MB page */
2874 context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2876 /* 32 bits PSE 4MB page */
2877 context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2879 case PT32E_ROOT_LEVEL:
2880 context->rsvd_bits_mask[0][2] =
2881 rsvd_bits(maxphyaddr, 63) |
2882 rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
2883 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2884 rsvd_bits(maxphyaddr, 62); /* PDE */
2885 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2886 rsvd_bits(maxphyaddr, 62); /* PTE */
2887 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2888 rsvd_bits(maxphyaddr, 62) |
2889 rsvd_bits(13, 20); /* large page */
2890 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2892 case PT64_ROOT_LEVEL:
2893 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2894 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2895 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2896 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2897 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2898 rsvd_bits(maxphyaddr, 51);
2899 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2900 rsvd_bits(maxphyaddr, 51);
2901 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2902 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2903 rsvd_bits(maxphyaddr, 51) |
2905 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2906 rsvd_bits(maxphyaddr, 51) |
2907 rsvd_bits(13, 20); /* large page */
2908 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2913 static int paging64_init_context_common(struct kvm_vcpu *vcpu,
2914 struct kvm_mmu *context,
2917 context->nx = is_nx(vcpu);
2919 reset_rsvds_bits_mask(vcpu, context, level);
2921 ASSERT(is_pae(vcpu));
2922 context->new_cr3 = paging_new_cr3;
2923 context->page_fault = paging64_page_fault;
2924 context->gva_to_gpa = paging64_gva_to_gpa;
2925 context->prefetch_page = paging64_prefetch_page;
2926 context->sync_page = paging64_sync_page;
2927 context->invlpg = paging64_invlpg;
2928 context->free = paging_free;
2929 context->root_level = level;
2930 context->shadow_root_level = level;
2931 context->root_hpa = INVALID_PAGE;
2932 context->direct_map = false;
2936 static int paging64_init_context(struct kvm_vcpu *vcpu,
2937 struct kvm_mmu *context)
2939 return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
2942 static int paging32_init_context(struct kvm_vcpu *vcpu,
2943 struct kvm_mmu *context)
2945 context->nx = false;
2947 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
2949 context->new_cr3 = paging_new_cr3;
2950 context->page_fault = paging32_page_fault;
2951 context->gva_to_gpa = paging32_gva_to_gpa;
2952 context->free = paging_free;
2953 context->prefetch_page = paging32_prefetch_page;
2954 context->sync_page = paging32_sync_page;
2955 context->invlpg = paging32_invlpg;
2956 context->root_level = PT32_ROOT_LEVEL;
2957 context->shadow_root_level = PT32E_ROOT_LEVEL;
2958 context->root_hpa = INVALID_PAGE;
2959 context->direct_map = false;
2963 static int paging32E_init_context(struct kvm_vcpu *vcpu,
2964 struct kvm_mmu *context)
2966 return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
2969 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2971 struct kvm_mmu *context = vcpu->arch.walk_mmu;
2973 context->base_role.word = 0;
2974 context->new_cr3 = nonpaging_new_cr3;
2975 context->page_fault = tdp_page_fault;
2976 context->free = nonpaging_free;
2977 context->prefetch_page = nonpaging_prefetch_page;
2978 context->sync_page = nonpaging_sync_page;
2979 context->invlpg = nonpaging_invlpg;
2980 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2981 context->root_hpa = INVALID_PAGE;
2982 context->direct_map = true;
2983 context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
2984 context->get_cr3 = get_cr3;
2985 context->inject_page_fault = kvm_inject_page_fault;
2986 context->nx = is_nx(vcpu);
2988 if (!is_paging(vcpu)) {
2989 context->nx = false;
2990 context->gva_to_gpa = nonpaging_gva_to_gpa;
2991 context->root_level = 0;
2992 } else if (is_long_mode(vcpu)) {
2993 context->nx = is_nx(vcpu);
2994 reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
2995 context->gva_to_gpa = paging64_gva_to_gpa;
2996 context->root_level = PT64_ROOT_LEVEL;
2997 } else if (is_pae(vcpu)) {
2998 context->nx = is_nx(vcpu);
2999 reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
3000 context->gva_to_gpa = paging64_gva_to_gpa;
3001 context->root_level = PT32E_ROOT_LEVEL;
3003 context->nx = false;
3004 reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
3005 context->gva_to_gpa = paging32_gva_to_gpa;
3006 context->root_level = PT32_ROOT_LEVEL;
3012 int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3016 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3018 if (!is_paging(vcpu))
3019 r = nonpaging_init_context(vcpu, context);
3020 else if (is_long_mode(vcpu))
3021 r = paging64_init_context(vcpu, context);
3022 else if (is_pae(vcpu))
3023 r = paging32E_init_context(vcpu, context);
3025 r = paging32_init_context(vcpu, context);
3027 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
3028 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
3032 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3034 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
3036 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
3038 vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
3039 vcpu->arch.walk_mmu->get_cr3 = get_cr3;
3040 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3045 static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3047 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
3049 g_context->get_cr3 = get_cr3;
3050 g_context->inject_page_fault = kvm_inject_page_fault;
3053 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
3054 * translation of l2_gpa to l1_gpa addresses is done using the
3055 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
3056 * functions between mmu and nested_mmu are swapped.
3058 if (!is_paging(vcpu)) {
3059 g_context->nx = false;
3060 g_context->root_level = 0;
3061 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
3062 } else if (is_long_mode(vcpu)) {
3063 g_context->nx = is_nx(vcpu);
3064 reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL);
3065 g_context->root_level = PT64_ROOT_LEVEL;
3066 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3067 } else if (is_pae(vcpu)) {
3068 g_context->nx = is_nx(vcpu);
3069 reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL);
3070 g_context->root_level = PT32E_ROOT_LEVEL;
3071 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
3073 g_context->nx = false;
3074 reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL);
3075 g_context->root_level = PT32_ROOT_LEVEL;
3076 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
3082 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
3084 vcpu->arch.update_pte.pfn = bad_pfn;
3086 if (mmu_is_nested(vcpu))
3087 return init_kvm_nested_mmu(vcpu);
3088 else if (tdp_enabled)
3089 return init_kvm_tdp_mmu(vcpu);
3091 return init_kvm_softmmu(vcpu);
3094 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
3097 if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
3098 /* mmu.free() should set root_hpa = INVALID_PAGE */
3099 vcpu->arch.mmu.free(vcpu);
3102 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
3104 destroy_kvm_mmu(vcpu);
3105 return init_kvm_mmu(vcpu);
3107 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
3109 int kvm_mmu_load(struct kvm_vcpu *vcpu)
3113 r = mmu_topup_memory_caches(vcpu);
3116 r = mmu_alloc_roots(vcpu);
3117 spin_lock(&vcpu->kvm->mmu_lock);
3118 mmu_sync_roots(vcpu);
3119 spin_unlock(&vcpu->kvm->mmu_lock);
3122 /* set_cr3() should ensure TLB has been flushed */
3123 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
3127 EXPORT_SYMBOL_GPL(kvm_mmu_load);
3129 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
3131 mmu_free_roots(vcpu);
3133 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
3135 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
3136 struct kvm_mmu_page *sp,
3140 struct kvm_mmu_page *child;
3143 if (is_shadow_present_pte(pte)) {
3144 if (is_last_spte(pte, sp->role.level))
3145 drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
3147 child = page_header(pte & PT64_BASE_ADDR_MASK);
3148 mmu_page_remove_parent_pte(child, spte);
3151 __set_spte(spte, shadow_trap_nonpresent_pte);
3152 if (is_large_pte(pte))
3153 --vcpu->kvm->stat.lpages;
3156 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
3157 struct kvm_mmu_page *sp,
3161 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
3162 ++vcpu->kvm->stat.mmu_pde_zapped;
3166 ++vcpu->kvm->stat.mmu_pte_updated;
3167 if (!sp->role.cr4_pae)
3168 paging32_update_pte(vcpu, sp, spte, new);
3170 paging64_update_pte(vcpu, sp, spte, new);
3173 static bool need_remote_flush(u64 old, u64 new)
3175 if (!is_shadow_present_pte(old))
3177 if (!is_shadow_present_pte(new))
3179 if ((old ^ new) & PT64_BASE_ADDR_MASK)
3181 old ^= PT64_NX_MASK;
3182 new ^= PT64_NX_MASK;
3183 return (old & ~new & PT64_PERM_MASK) != 0;
3186 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
3187 bool remote_flush, bool local_flush)
3193 kvm_flush_remote_tlbs(vcpu->kvm);
3194 else if (local_flush)
3195 kvm_mmu_flush_tlb(vcpu);
3198 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
3200 u64 *spte = vcpu->arch.last_pte_updated;
3202 return !!(spte && (*spte & shadow_accessed_mask));
3205 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3211 if (!is_present_gpte(gpte))
3213 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
3215 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
3217 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3219 if (is_error_pfn(pfn)) {
3220 kvm_release_pfn_clean(pfn);
3223 vcpu->arch.update_pte.pfn = pfn;
3226 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
3228 u64 *spte = vcpu->arch.last_pte_updated;
3231 && vcpu->arch.last_pte_gfn == gfn
3232 && shadow_accessed_mask
3233 && !(*spte & shadow_accessed_mask)
3234 && is_shadow_present_pte(*spte))
3235 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
3238 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3239 const u8 *new, int bytes,
3240 bool guest_initiated)
3242 gfn_t gfn = gpa >> PAGE_SHIFT;
3243 union kvm_mmu_page_role mask = { .word = 0 };
3244 struct kvm_mmu_page *sp;
3245 struct hlist_node *node;
3246 LIST_HEAD(invalid_list);
3249 unsigned offset = offset_in_page(gpa);
3251 unsigned page_offset;
3252 unsigned misaligned;
3259 bool remote_flush, local_flush, zap_page;
3261 zap_page = remote_flush = local_flush = false;
3263 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
3265 invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
3268 * Assume that the pte write on a page table of the same type
3269 * as the current vcpu paging mode since we update the sptes only
3270 * when they have the same mode.
3272 if ((is_pae(vcpu) && bytes == 4) || !new) {
3273 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3278 r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
3281 new = (const u8 *)&gentry;
3286 gentry = *(const u32 *)new;
3289 gentry = *(const u64 *)new;
3296 mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
3297 spin_lock(&vcpu->kvm->mmu_lock);
3298 if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3300 kvm_mmu_free_some_pages(vcpu);
3301 ++vcpu->kvm->stat.mmu_pte_write;
3302 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3303 if (guest_initiated) {
3304 kvm_mmu_access_page(vcpu, gfn);
3305 if (gfn == vcpu->arch.last_pt_write_gfn
3306 && !last_updated_pte_accessed(vcpu)) {
3307 ++vcpu->arch.last_pt_write_count;
3308 if (vcpu->arch.last_pt_write_count >= 3)
3311 vcpu->arch.last_pt_write_gfn = gfn;
3312 vcpu->arch.last_pt_write_count = 1;
3313 vcpu->arch.last_pte_updated = NULL;
3317 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3318 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
3319 pte_size = sp->role.cr4_pae ? 8 : 4;
3320 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
3321 misaligned |= bytes < 4;
3322 if (misaligned || flooded) {
3324 * Misaligned accesses are too much trouble to fix
3325 * up; also, they usually indicate a page is not used
3328 * If we're seeing too many writes to a page,
3329 * it may no longer be a page table, or we may be
3330 * forking, in which case it is better to unmap the
3333 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
3334 gpa, bytes, sp->role.word);
3335 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3337 ++vcpu->kvm->stat.mmu_flooded;
3340 page_offset = offset;
3341 level = sp->role.level;
3343 if (!sp->role.cr4_pae) {
3344 page_offset <<= 1; /* 32->64 */
3346 * A 32-bit pde maps 4MB while the shadow pdes map
3347 * only 2MB. So we need to double the offset again
3348 * and zap two pdes instead of one.
3350 if (level == PT32_ROOT_LEVEL) {
3351 page_offset &= ~7; /* kill rounding error */
3355 quadrant = page_offset >> PAGE_SHIFT;
3356 page_offset &= ~PAGE_MASK;
3357 if (quadrant != sp->role.quadrant)
3361 spte = &sp->spt[page_offset / sizeof(*spte)];
3364 mmu_pte_write_zap_pte(vcpu, sp, spte);
3366 !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3368 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
3369 if (!remote_flush && need_remote_flush(entry, *spte))
3370 remote_flush = true;
3374 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
3375 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3376 trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
3377 spin_unlock(&vcpu->kvm->mmu_lock);
3378 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
3379 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
3380 vcpu->arch.update_pte.pfn = bad_pfn;
3384 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
3389 if (vcpu->arch.mmu.direct_map)
3392 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
3394 spin_lock(&vcpu->kvm->mmu_lock);
3395 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3396 spin_unlock(&vcpu->kvm->mmu_lock);
3399 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
3401 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
3403 LIST_HEAD(invalid_list);
3405 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
3406 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
3407 struct kvm_mmu_page *sp;
3409 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
3410 struct kvm_mmu_page, link);
3411 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3412 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3413 ++vcpu->kvm->stat.mmu_recycled;
3417 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
3418 void *insn, int insn_len)
3421 enum emulation_result er;
3423 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
3432 r = mmu_topup_memory_caches(vcpu);
3436 er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
3441 case EMULATE_DO_MMIO:
3442 ++vcpu->stat.mmio_exits;
3452 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
3454 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
3456 vcpu->arch.mmu.invlpg(vcpu, gva);
3457 kvm_mmu_flush_tlb(vcpu);
3458 ++vcpu->stat.invlpg;
3460 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
3462 void kvm_enable_tdp(void)
3466 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
3468 void kvm_disable_tdp(void)
3470 tdp_enabled = false;
3472 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
3474 static void free_mmu_pages(struct kvm_vcpu *vcpu)
3476 free_page((unsigned long)vcpu->arch.mmu.pae_root);
3477 if (vcpu->arch.mmu.lm_root != NULL)
3478 free_page((unsigned long)vcpu->arch.mmu.lm_root);
3481 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
3489 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3490 * Therefore we need to allocate shadow page tables in the first
3491 * 4GB of memory, which happens to fit the DMA32 zone.
3493 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
3497 vcpu->arch.mmu.pae_root = page_address(page);
3498 for (i = 0; i < 4; ++i)
3499 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3504 int kvm_mmu_create(struct kvm_vcpu *vcpu)
3507 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3509 return alloc_mmu_pages(vcpu);
3512 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
3515 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3517 return init_kvm_mmu(vcpu);
3520 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
3522 struct kvm_mmu_page *sp;
3524 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
3528 if (!test_bit(slot, sp->slot_bitmap))
3532 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3533 if (!is_shadow_present_pte(pt[i]) ||
3534 !is_last_spte(pt[i], sp->role.level))
3537 if (is_large_pte(pt[i])) {
3538 drop_spte(kvm, &pt[i],
3539 shadow_trap_nonpresent_pte);
3545 if (is_writable_pte(pt[i]))
3546 update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
3549 kvm_flush_remote_tlbs(kvm);
3552 void kvm_mmu_zap_all(struct kvm *kvm)
3554 struct kvm_mmu_page *sp, *node;
3555 LIST_HEAD(invalid_list);
3557 spin_lock(&kvm->mmu_lock);
3559 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
3560 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3563 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3564 spin_unlock(&kvm->mmu_lock);
3567 static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3568 struct list_head *invalid_list)
3570 struct kvm_mmu_page *page;
3572 page = container_of(kvm->arch.active_mmu_pages.prev,
3573 struct kvm_mmu_page, link);
3574 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3577 static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3580 struct kvm *kvm_freed = NULL;
3582 if (nr_to_scan == 0)
3585 raw_spin_lock(&kvm_lock);
3587 list_for_each_entry(kvm, &vm_list, vm_list) {
3588 int idx, freed_pages;
3589 LIST_HEAD(invalid_list);
3591 idx = srcu_read_lock(&kvm->srcu);
3592 spin_lock(&kvm->mmu_lock);
3593 if (!kvm_freed && nr_to_scan > 0 &&
3594 kvm->arch.n_used_mmu_pages > 0) {
3595 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3601 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3602 spin_unlock(&kvm->mmu_lock);
3603 srcu_read_unlock(&kvm->srcu, idx);
3606 list_move_tail(&kvm_freed->vm_list, &vm_list);
3608 raw_spin_unlock(&kvm_lock);
3611 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
3614 static struct shrinker mmu_shrinker = {
3615 .shrink = mmu_shrink,
3616 .seeks = DEFAULT_SEEKS * 10,
3619 static void mmu_destroy_caches(void)
3621 if (pte_chain_cache)
3622 kmem_cache_destroy(pte_chain_cache);
3623 if (rmap_desc_cache)
3624 kmem_cache_destroy(rmap_desc_cache);
3625 if (mmu_page_header_cache)
3626 kmem_cache_destroy(mmu_page_header_cache);
3629 int kvm_mmu_module_init(void)
3631 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
3632 sizeof(struct kvm_pte_chain),
3634 if (!pte_chain_cache)
3636 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
3637 sizeof(struct kvm_rmap_desc),
3639 if (!rmap_desc_cache)
3642 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3643 sizeof(struct kvm_mmu_page),
3645 if (!mmu_page_header_cache)
3648 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
3651 register_shrinker(&mmu_shrinker);
3656 mmu_destroy_caches();
3661 * Caculate mmu pages needed for kvm.
3663 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3666 unsigned int nr_mmu_pages;
3667 unsigned int nr_pages = 0;
3668 struct kvm_memslots *slots;
3670 slots = kvm_memslots(kvm);
3672 for (i = 0; i < slots->nmemslots; i++)
3673 nr_pages += slots->memslots[i].npages;
3675 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3676 nr_mmu_pages = max(nr_mmu_pages,
3677 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3679 return nr_mmu_pages;
3682 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3685 if (len > buffer->len)
3690 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3695 ret = pv_mmu_peek_buffer(buffer, len);
3700 buffer->processed += len;
3704 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3705 gpa_t addr, gpa_t value)
3710 if (!is_long_mode(vcpu) && !is_pae(vcpu))
3713 r = mmu_topup_memory_caches(vcpu);
3717 if (!emulator_write_phys(vcpu, addr, &value, bytes))
3723 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3725 (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
3729 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3731 spin_lock(&vcpu->kvm->mmu_lock);
3732 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3733 spin_unlock(&vcpu->kvm->mmu_lock);
3737 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3738 struct kvm_pv_mmu_op_buffer *buffer)
3740 struct kvm_mmu_op_header *header;
3742 header = pv_mmu_peek_buffer(buffer, sizeof *header);
3745 switch (header->op) {
3746 case KVM_MMU_OP_WRITE_PTE: {
3747 struct kvm_mmu_op_write_pte *wpte;
3749 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3752 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3755 case KVM_MMU_OP_FLUSH_TLB: {
3756 struct kvm_mmu_op_flush_tlb *ftlb;
3758 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3761 return kvm_pv_mmu_flush_tlb(vcpu);
3763 case KVM_MMU_OP_RELEASE_PT: {
3764 struct kvm_mmu_op_release_pt *rpt;
3766 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3769 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3775 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3776 gpa_t addr, unsigned long *ret)
3779 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3781 buffer->ptr = buffer->buf;
3782 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3783 buffer->processed = 0;
3785 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3789 while (buffer->len) {
3790 r = kvm_pv_mmu_op_one(vcpu, buffer);
3799 *ret = buffer->processed;
3803 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3805 struct kvm_shadow_walk_iterator iterator;
3808 spin_lock(&vcpu->kvm->mmu_lock);
3809 for_each_shadow_entry(vcpu, addr, iterator) {
3810 sptes[iterator.level-1] = *iterator.sptep;
3812 if (!is_shadow_present_pte(*iterator.sptep))
3815 spin_unlock(&vcpu->kvm->mmu_lock);
3819 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3821 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3825 destroy_kvm_mmu(vcpu);
3826 free_mmu_pages(vcpu);
3827 mmu_free_memory_caches(vcpu);
3830 #ifdef CONFIG_KVM_MMU_AUDIT
3831 #include "mmu_audit.c"
3833 static void mmu_audit_disable(void) { }
3836 void kvm_mmu_module_exit(void)
3838 mmu_destroy_caches();
3839 percpu_counter_destroy(&kvm_total_used_mmu_pages);
3840 unregister_shrinker(&mmu_shrinker);
3841 mmu_audit_disable();