2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/types.h>
20 #include <linux/string.h>
23 #include <linux/highmem.h>
24 #include <linux/module.h>
29 #define pgprintk(x...) do { } while (0)
30 #define rmap_printk(x...) do { } while (0)
34 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
35 __FILE__, __LINE__, #x); \
38 #define PT64_ENT_PER_PAGE 512
39 #define PT32_ENT_PER_PAGE 1024
41 #define PT_WRITABLE_SHIFT 1
43 #define PT_PRESENT_MASK (1ULL << 0)
44 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
45 #define PT_USER_MASK (1ULL << 2)
46 #define PT_PWT_MASK (1ULL << 3)
47 #define PT_PCD_MASK (1ULL << 4)
48 #define PT_ACCESSED_MASK (1ULL << 5)
49 #define PT_DIRTY_MASK (1ULL << 6)
50 #define PT_PAGE_SIZE_MASK (1ULL << 7)
51 #define PT_PAT_MASK (1ULL << 7)
52 #define PT_GLOBAL_MASK (1ULL << 8)
53 #define PT64_NX_MASK (1ULL << 63)
55 #define PT_PAT_SHIFT 7
56 #define PT_DIR_PAT_SHIFT 12
57 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
59 #define PT32_DIR_PSE36_SIZE 4
60 #define PT32_DIR_PSE36_SHIFT 13
61 #define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
64 #define PT32_PTE_COPY_MASK \
65 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
67 #define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
69 #define PT_FIRST_AVAIL_BITS_SHIFT 9
70 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
72 #define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
73 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
75 #define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
76 #define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
78 #define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
79 #define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
81 #define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
83 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
85 #define PT64_LEVEL_BITS 9
87 #define PT64_LEVEL_SHIFT(level) \
88 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
90 #define PT64_LEVEL_MASK(level) \
91 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
93 #define PT64_INDEX(address, level)\
94 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
97 #define PT32_LEVEL_BITS 10
99 #define PT32_LEVEL_SHIFT(level) \
100 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
102 #define PT32_LEVEL_MASK(level) \
103 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
105 #define PT32_INDEX(address, level)\
106 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
109 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
110 #define PT64_DIR_BASE_ADDR_MASK \
111 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
113 #define PT32_BASE_ADDR_MASK PAGE_MASK
114 #define PT32_DIR_BASE_ADDR_MASK \
115 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
118 #define PFERR_PRESENT_MASK (1U << 0)
119 #define PFERR_WRITE_MASK (1U << 1)
120 #define PFERR_USER_MASK (1U << 2)
122 #define PT64_ROOT_LEVEL 4
123 #define PT32_ROOT_LEVEL 2
124 #define PT32E_ROOT_LEVEL 3
126 #define PT_DIRECTORY_LEVEL 2
127 #define PT_PAGE_TABLE_LEVEL 1
131 struct kvm_rmap_desc {
132 u64 *shadow_ptes[RMAP_EXT];
133 struct kvm_rmap_desc *more;
136 static int is_write_protection(struct kvm_vcpu *vcpu)
138 return vcpu->cr0 & CR0_WP_MASK;
141 static int is_cpuid_PSE36(void)
146 static int is_present_pte(unsigned long pte)
148 return pte & PT_PRESENT_MASK;
151 static int is_writeble_pte(unsigned long pte)
153 return pte & PT_WRITABLE_MASK;
156 static int is_io_pte(unsigned long pte)
158 return pte & PT_SHADOW_IO_MARK;
161 static int is_rmap_pte(u64 pte)
163 return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
164 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
168 * Reverse mapping data structures:
170 * If page->private bit zero is zero, then page->private points to the
171 * shadow page table entry that points to page_address(page).
173 * If page->private bit zero is one, (then page->private & ~1) points
174 * to a struct kvm_rmap_desc containing more mappings.
176 static void rmap_add(struct kvm *kvm, u64 *spte)
179 struct kvm_rmap_desc *desc;
182 if (!is_rmap_pte(*spte))
184 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
185 if (!page->private) {
186 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
187 page->private = (unsigned long)spte;
188 } else if (!(page->private & 1)) {
189 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
190 desc = kzalloc(sizeof *desc, GFP_NOWAIT);
192 BUG(); /* FIXME: return error */
193 desc->shadow_ptes[0] = (u64 *)page->private;
194 desc->shadow_ptes[1] = spte;
195 page->private = (unsigned long)desc | 1;
197 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
198 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
199 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
201 if (desc->shadow_ptes[RMAP_EXT-1]) {
202 desc->more = kzalloc(sizeof *desc->more, GFP_NOWAIT);
204 BUG(); /* FIXME: return error */
207 for (i = 0; desc->shadow_ptes[i]; ++i)
209 desc->shadow_ptes[i] = spte;
213 static void rmap_desc_remove_entry(struct page *page,
214 struct kvm_rmap_desc *desc,
216 struct kvm_rmap_desc *prev_desc)
220 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
222 desc->shadow_ptes[i] = desc->shadow_ptes[j];
223 desc->shadow_ptes[j] = 0;
226 if (!prev_desc && !desc->more)
227 page->private = (unsigned long)desc->shadow_ptes[0];
230 prev_desc->more = desc->more;
232 page->private = (unsigned long)desc->more | 1;
236 static void rmap_remove(struct kvm *kvm, u64 *spte)
239 struct kvm_rmap_desc *desc;
240 struct kvm_rmap_desc *prev_desc;
243 if (!is_rmap_pte(*spte))
245 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
246 if (!page->private) {
247 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
249 } else if (!(page->private & 1)) {
250 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
251 if ((u64 *)page->private != spte) {
252 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
258 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
259 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
262 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
263 if (desc->shadow_ptes[i] == spte) {
264 rmap_desc_remove_entry(page, desc, i,
275 static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
277 struct kvm_mmu_page *page_head = page_header(page_hpa);
279 list_del(&page_head->link);
280 page_head->page_hpa = page_hpa;
281 list_add(&page_head->link, &vcpu->free_pages);
284 static int is_empty_shadow_page(hpa_t page_hpa)
288 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u32);
295 static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
297 struct kvm_mmu_page *page;
299 if (list_empty(&vcpu->free_pages))
302 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
303 list_del(&page->link);
304 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
305 ASSERT(is_empty_shadow_page(page->page_hpa));
306 page->slot_bitmap = 0;
308 page->parent_pte = parent_pte;
309 return page->page_hpa;
312 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
314 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
315 struct kvm_mmu_page *page_head = page_header(__pa(pte));
317 __set_bit(slot, &page_head->slot_bitmap);
320 hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
322 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
324 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
327 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
329 struct kvm_memory_slot *slot;
332 ASSERT((gpa & HPA_ERR_MASK) == 0);
333 slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
335 return gpa | HPA_ERR_MASK;
336 page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
337 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
338 | (gpa & (PAGE_SIZE-1));
341 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
343 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
345 if (gpa == UNMAPPED_GVA)
347 return gpa_to_hpa(vcpu, gpa);
351 static void release_pt_page_64(struct kvm_vcpu *vcpu, hpa_t page_hpa,
358 ASSERT(VALID_PAGE(page_hpa));
359 ASSERT(level <= PT64_ROOT_LEVEL && level > 0);
361 for (pos = __va(page_hpa), end = pos + PT64_ENT_PER_PAGE;
363 u64 current_ent = *pos;
365 if (is_present_pte(current_ent)) {
367 release_pt_page_64(vcpu,
372 rmap_remove(vcpu->kvm, pos);
376 kvm_mmu_free_page(vcpu, page_hpa);
379 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
383 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
385 int level = PT32E_ROOT_LEVEL;
386 hpa_t table_addr = vcpu->mmu.root_hpa;
389 u32 index = PT64_INDEX(v, level);
392 ASSERT(VALID_PAGE(table_addr));
393 table = __va(table_addr);
396 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
397 page_header_update_slot(vcpu->kvm, table, v);
398 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
400 rmap_add(vcpu->kvm, &table[index]);
404 if (table[index] == 0) {
405 hpa_t new_table = kvm_mmu_alloc_page(vcpu,
408 if (!VALID_PAGE(new_table)) {
409 pgprintk("nonpaging_map: ENOMEM\n");
413 if (level == PT32E_ROOT_LEVEL)
414 table[index] = new_table | PT_PRESENT_MASK;
416 table[index] = new_table | PT_PRESENT_MASK |
417 PT_WRITABLE_MASK | PT_USER_MASK;
419 table_addr = table[index] & PT64_BASE_ADDR_MASK;
423 static void mmu_free_roots(struct kvm_vcpu *vcpu)
428 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
429 hpa_t root = vcpu->mmu.root_hpa;
431 ASSERT(VALID_PAGE(root));
432 release_pt_page_64(vcpu, root, PT64_ROOT_LEVEL);
433 vcpu->mmu.root_hpa = INVALID_PAGE;
437 for (i = 0; i < 4; ++i) {
438 hpa_t root = vcpu->mmu.pae_root[i];
440 ASSERT(VALID_PAGE(root));
441 root &= PT64_BASE_ADDR_MASK;
442 release_pt_page_64(vcpu, root, PT32E_ROOT_LEVEL - 1);
443 vcpu->mmu.pae_root[i] = INVALID_PAGE;
445 vcpu->mmu.root_hpa = INVALID_PAGE;
448 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
453 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
454 hpa_t root = vcpu->mmu.root_hpa;
456 ASSERT(!VALID_PAGE(root));
457 root = kvm_mmu_alloc_page(vcpu, NULL);
458 vcpu->mmu.root_hpa = root;
462 for (i = 0; i < 4; ++i) {
463 hpa_t root = vcpu->mmu.pae_root[i];
465 ASSERT(!VALID_PAGE(root));
466 root = kvm_mmu_alloc_page(vcpu, NULL);
467 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
469 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
472 static void nonpaging_flush(struct kvm_vcpu *vcpu)
474 hpa_t root = vcpu->mmu.root_hpa;
476 ++kvm_stat.tlb_flush;
477 pgprintk("nonpaging_flush\n");
478 mmu_free_roots(vcpu);
479 mmu_alloc_roots(vcpu);
480 kvm_arch_ops->set_cr3(vcpu, root);
481 kvm_arch_ops->tlb_flush(vcpu);
484 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
489 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
496 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
501 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
503 if (is_error_hpa(paddr))
506 ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
508 nonpaging_flush(vcpu);
516 static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
520 static void nonpaging_free(struct kvm_vcpu *vcpu)
522 mmu_free_roots(vcpu);
525 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
527 struct kvm_mmu *context = &vcpu->mmu;
529 context->new_cr3 = nonpaging_new_cr3;
530 context->page_fault = nonpaging_page_fault;
531 context->inval_page = nonpaging_inval_page;
532 context->gva_to_gpa = nonpaging_gva_to_gpa;
533 context->free = nonpaging_free;
534 context->root_level = PT32E_ROOT_LEVEL;
535 context->shadow_root_level = PT32E_ROOT_LEVEL;
536 mmu_alloc_roots(vcpu);
537 ASSERT(VALID_PAGE(context->root_hpa));
538 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
543 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
545 struct kvm_mmu_page *page, *npage;
547 list_for_each_entry_safe(page, npage, &vcpu->kvm->active_mmu_pages,
552 if (!page->parent_pte)
555 *page->parent_pte = 0;
556 release_pt_page_64(vcpu, page->page_hpa, 1);
558 ++kvm_stat.tlb_flush;
559 kvm_arch_ops->tlb_flush(vcpu);
562 static void paging_new_cr3(struct kvm_vcpu *vcpu)
564 kvm_mmu_flush_tlb(vcpu);
567 static void mark_pagetable_nonglobal(void *shadow_pte)
569 page_header(__pa(shadow_pte))->global = 0;
572 static inline void set_pte_common(struct kvm_vcpu *vcpu,
580 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
582 access_bits &= ~PT_WRITABLE_MASK;
584 if (access_bits & PT_WRITABLE_MASK)
585 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
587 *shadow_pte |= access_bits;
589 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
591 if (!(*shadow_pte & PT_GLOBAL_MASK))
592 mark_pagetable_nonglobal(shadow_pte);
594 if (is_error_hpa(paddr)) {
595 *shadow_pte |= gaddr;
596 *shadow_pte |= PT_SHADOW_IO_MARK;
597 *shadow_pte &= ~PT_PRESENT_MASK;
599 *shadow_pte |= paddr;
600 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
601 rmap_add(vcpu->kvm, shadow_pte);
605 static void inject_page_fault(struct kvm_vcpu *vcpu,
609 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
612 static inline int fix_read_pf(u64 *shadow_ent)
614 if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
615 !(*shadow_ent & PT_USER_MASK)) {
617 * If supervisor write protect is disabled, we shadow kernel
618 * pages as user pages so we can trap the write access.
620 *shadow_ent |= PT_USER_MASK;
621 *shadow_ent &= ~PT_WRITABLE_MASK;
629 static int may_access(u64 pte, int write, int user)
632 if (user && !(pte & PT_USER_MASK))
634 if (write && !(pte & PT_WRITABLE_MASK))
640 * Remove a shadow pte.
642 static void paging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
644 hpa_t page_addr = vcpu->mmu.root_hpa;
645 int level = vcpu->mmu.shadow_root_level;
650 u32 index = PT64_INDEX(addr, level);
651 u64 *table = __va(page_addr);
653 if (level == PT_PAGE_TABLE_LEVEL ) {
654 rmap_remove(vcpu->kvm, &table[index]);
659 if (!is_present_pte(table[index]))
662 page_addr = table[index] & PT64_BASE_ADDR_MASK;
664 if (level == PT_DIRECTORY_LEVEL &&
665 (table[index] & PT_SHADOW_PS_MARK)) {
667 release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL);
669 kvm_arch_ops->tlb_flush(vcpu);
675 static void paging_free(struct kvm_vcpu *vcpu)
677 nonpaging_free(vcpu);
681 #include "paging_tmpl.h"
685 #include "paging_tmpl.h"
688 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
690 struct kvm_mmu *context = &vcpu->mmu;
692 ASSERT(is_pae(vcpu));
693 context->new_cr3 = paging_new_cr3;
694 context->page_fault = paging64_page_fault;
695 context->inval_page = paging_inval_page;
696 context->gva_to_gpa = paging64_gva_to_gpa;
697 context->free = paging_free;
698 context->root_level = level;
699 context->shadow_root_level = level;
700 mmu_alloc_roots(vcpu);
701 ASSERT(VALID_PAGE(context->root_hpa));
702 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
703 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
707 static int paging64_init_context(struct kvm_vcpu *vcpu)
709 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
712 static int paging32_init_context(struct kvm_vcpu *vcpu)
714 struct kvm_mmu *context = &vcpu->mmu;
716 context->new_cr3 = paging_new_cr3;
717 context->page_fault = paging32_page_fault;
718 context->inval_page = paging_inval_page;
719 context->gva_to_gpa = paging32_gva_to_gpa;
720 context->free = paging_free;
721 context->root_level = PT32_ROOT_LEVEL;
722 context->shadow_root_level = PT32E_ROOT_LEVEL;
723 mmu_alloc_roots(vcpu);
724 ASSERT(VALID_PAGE(context->root_hpa));
725 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
726 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
730 static int paging32E_init_context(struct kvm_vcpu *vcpu)
732 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
735 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
738 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
740 if (!is_paging(vcpu))
741 return nonpaging_init_context(vcpu);
742 else if (is_long_mode(vcpu))
743 return paging64_init_context(vcpu);
744 else if (is_pae(vcpu))
745 return paging32E_init_context(vcpu);
747 return paging32_init_context(vcpu);
750 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
753 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
754 vcpu->mmu.free(vcpu);
755 vcpu->mmu.root_hpa = INVALID_PAGE;
759 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
761 destroy_kvm_mmu(vcpu);
762 return init_kvm_mmu(vcpu);
765 static void free_mmu_pages(struct kvm_vcpu *vcpu)
767 while (!list_empty(&vcpu->free_pages)) {
768 struct kvm_mmu_page *page;
770 page = list_entry(vcpu->free_pages.next,
771 struct kvm_mmu_page, link);
772 list_del(&page->link);
773 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
774 page->page_hpa = INVALID_PAGE;
776 free_page((unsigned long)vcpu->mmu.pae_root);
779 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
786 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
787 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
789 INIT_LIST_HEAD(&page_header->link);
790 if ((page = alloc_page(GFP_KERNEL)) == NULL)
792 page->private = (unsigned long)page_header;
793 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
794 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
795 list_add(&page_header->link, &vcpu->free_pages);
799 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
800 * Therefore we need to allocate shadow page tables in the first
801 * 4GB of memory, which happens to fit the DMA32 zone.
803 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
806 vcpu->mmu.pae_root = page_address(page);
807 for (i = 0; i < 4; ++i)
808 vcpu->mmu.pae_root[i] = INVALID_PAGE;
813 free_mmu_pages(vcpu);
817 int kvm_mmu_create(struct kvm_vcpu *vcpu)
820 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
821 ASSERT(list_empty(&vcpu->free_pages));
823 return alloc_mmu_pages(vcpu);
826 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
829 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
830 ASSERT(!list_empty(&vcpu->free_pages));
832 return init_kvm_mmu(vcpu);
835 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
839 destroy_kvm_mmu(vcpu);
840 free_mmu_pages(vcpu);
843 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
845 struct kvm_mmu_page *page;
847 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
851 if (!test_bit(slot, &page->slot_bitmap))
854 pt = __va(page->page_hpa);
855 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
857 if (pt[i] & PT_WRITABLE_MASK) {
858 rmap_remove(kvm, &pt[i]);
859 pt[i] &= ~PT_WRITABLE_MASK;