2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
36 #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
37 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
38 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
40 #define PT_MAX_FULL_LEVELS 4
41 #define CMPXCHG cmpxchg
43 #define CMPXCHG cmpxchg64
44 #define PT_MAX_FULL_LEVELS 2
47 #define pt_element_t u32
48 #define guest_walker guest_walker32
49 #define FNAME(name) paging##32_##name
50 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
51 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
52 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
53 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
54 #define PT_LEVEL_BITS PT32_LEVEL_BITS
55 #define PT_MAX_FULL_LEVELS 2
56 #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
57 #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
58 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
59 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
60 #define CMPXCHG cmpxchg
62 #error Invalid PTTYPE value
65 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
66 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
69 * The guest_walker structure emulates the behavior of the hardware page
75 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
76 pt_element_t ptes[PT_MAX_FULL_LEVELS];
77 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
78 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
79 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
83 struct x86_exception fault;
86 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
88 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
91 static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
95 /* dirty bit is not supported, so no need to track it */
96 if (!PT_GUEST_DIRTY_MASK)
99 BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
101 mask = (unsigned)~ACC_WRITE_MASK;
102 /* Allow write access to dirty gptes */
103 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
108 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
112 bit7 = (gpte >> 7) & 1;
113 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
116 static inline int FNAME(is_present_gpte)(unsigned long pte)
118 return is_present_gpte(pte);
121 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
122 pt_element_t __user *ptep_user, unsigned index,
123 pt_element_t orig_pte, pt_element_t new_pte)
130 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
131 /* Check if the user is doing something meaningless. */
132 if (unlikely(npages != 1))
135 table = kmap_atomic(page);
136 ret = CMPXCHG(&table[index], orig_pte, new_pte);
137 kunmap_atomic(table);
139 kvm_release_page_dirty(page);
141 return (ret != orig_pte);
144 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
145 struct kvm_mmu_page *sp, u64 *spte,
148 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
151 if (!FNAME(is_present_gpte)(gpte))
154 /* if accessed bit is not supported prefetch non accessed gpte */
155 if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
161 drop_spte(vcpu->kvm, spte);
165 static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
169 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
170 access &= ~(gpte >> PT64_NX_SHIFT);
175 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
177 struct guest_walker *walker,
180 unsigned level, index;
181 pt_element_t pte, orig_pte;
182 pt_element_t __user *ptep_user;
186 /* dirty/accessed bits are not supported, so no need to update them */
187 if (!PT_GUEST_DIRTY_MASK)
190 for (level = walker->max_level; level >= walker->level; --level) {
191 pte = orig_pte = walker->ptes[level - 1];
192 table_gfn = walker->table_gfn[level - 1];
193 ptep_user = walker->ptep_user[level - 1];
194 index = offset_in_page(ptep_user) / sizeof(pt_element_t);
195 if (!(pte & PT_GUEST_ACCESSED_MASK)) {
196 trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
197 pte |= PT_GUEST_ACCESSED_MASK;
199 if (level == walker->level && write_fault &&
200 !(pte & PT_GUEST_DIRTY_MASK)) {
201 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
202 pte |= PT_GUEST_DIRTY_MASK;
207 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
211 mark_page_dirty(vcpu->kvm, table_gfn);
212 walker->ptes[level] = pte;
218 * Fetch a guest pte for a guest virtual address
220 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
221 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
222 gva_t addr, u32 access)
226 pt_element_t __user *uninitialized_var(ptep_user);
228 unsigned index, pt_access, pte_access, accessed_dirty;
231 const int write_fault = access & PFERR_WRITE_MASK;
232 const int user_fault = access & PFERR_USER_MASK;
233 const int fetch_fault = access & PFERR_FETCH_MASK;
238 trace_kvm_mmu_pagetable_walk(addr, access);
240 walker->level = mmu->root_level;
241 pte = mmu->get_cr3(vcpu);
244 if (walker->level == PT32E_ROOT_LEVEL) {
245 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
246 trace_kvm_mmu_paging_element(pte, walker->level);
247 if (!FNAME(is_present_gpte)(pte))
252 walker->max_level = walker->level;
253 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
254 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
256 accessed_dirty = PT_GUEST_ACCESSED_MASK;
257 pt_access = pte_access = ACC_ALL;
262 unsigned long host_addr;
264 pt_access &= pte_access;
267 index = PT_INDEX(addr, walker->level);
269 table_gfn = gpte_to_gfn(pte);
270 offset = index * sizeof(pt_element_t);
271 pte_gpa = gfn_to_gpa(table_gfn) + offset;
272 walker->table_gfn[walker->level - 1] = table_gfn;
273 walker->pte_gpa[walker->level - 1] = pte_gpa;
275 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
276 PFERR_USER_MASK|PFERR_WRITE_MASK);
277 if (unlikely(real_gfn == UNMAPPED_GVA))
279 real_gfn = gpa_to_gfn(real_gfn);
281 host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
282 if (unlikely(kvm_is_error_hva(host_addr)))
285 ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
286 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
288 walker->ptep_user[walker->level - 1] = ptep_user;
290 trace_kvm_mmu_paging_element(pte, walker->level);
292 if (unlikely(!FNAME(is_present_gpte)(pte)))
295 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
297 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
301 accessed_dirty &= pte;
302 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
304 walker->ptes[walker->level - 1] = pte;
305 } while (!is_last_gpte(mmu, walker->level, pte));
307 if (unlikely(permission_fault(mmu, pte_access, access))) {
308 errcode |= PFERR_PRESENT_MASK;
312 gfn = gpte_to_gfn_lvl(pte, walker->level);
313 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
315 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
316 gfn += pse36_gfn_delta(pte);
318 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access);
319 if (real_gpa == UNMAPPED_GVA)
322 walker->gfn = real_gpa >> PAGE_SHIFT;
325 FNAME(protect_clean_gpte)(&pte_access, pte);
328 * On a write fault, fold the dirty bit into accessed_dirty.
329 * For modes without A/D bits support accessed_dirty will be
332 accessed_dirty &= pte >>
333 (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
335 if (unlikely(!accessed_dirty)) {
336 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
337 if (unlikely(ret < 0))
343 walker->pt_access = pt_access;
344 walker->pte_access = pte_access;
345 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
346 __func__, (u64)pte, pte_access, pt_access);
350 errcode |= write_fault | user_fault;
351 if (fetch_fault && (mmu->nx ||
352 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
353 errcode |= PFERR_FETCH_MASK;
355 walker->fault.vector = PF_VECTOR;
356 walker->fault.error_code_valid = true;
357 walker->fault.error_code = errcode;
358 walker->fault.address = addr;
359 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
361 trace_kvm_mmu_walker_error(walker->fault.error_code);
365 static int FNAME(walk_addr)(struct guest_walker *walker,
366 struct kvm_vcpu *vcpu, gva_t addr, u32 access)
368 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
372 static int FNAME(walk_addr_nested)(struct guest_walker *walker,
373 struct kvm_vcpu *vcpu, gva_t addr,
376 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
381 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
382 u64 *spte, pt_element_t gpte, bool no_dirty_log)
388 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
391 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
393 gfn = gpte_to_gfn(gpte);
394 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
395 FNAME(protect_clean_gpte)(&pte_access, gpte);
396 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
397 no_dirty_log && (pte_access & ACC_WRITE_MASK));
398 if (is_error_pfn(pfn))
402 * we call mmu_set_spte() with host_writable = true because
403 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
405 mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
406 gfn, pfn, true, true);
411 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
412 u64 *spte, const void *pte)
414 pt_element_t gpte = *(const pt_element_t *)pte;
416 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
419 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
420 struct guest_walker *gw, int level)
422 pt_element_t curr_pte;
423 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
427 if (level == PT_PAGE_TABLE_LEVEL) {
428 mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
429 base_gpa = pte_gpa & ~mask;
430 index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
432 r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
433 gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
434 curr_pte = gw->prefetch_ptes[index];
436 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
437 &curr_pte, sizeof(curr_pte));
439 return r || curr_pte != gw->ptes[level - 1];
442 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
445 struct kvm_mmu_page *sp;
446 pt_element_t *gptep = gw->prefetch_ptes;
450 sp = page_header(__pa(sptep));
452 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
456 return __direct_pte_prefetch(vcpu, sp, sptep);
458 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
461 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
465 if (is_shadow_present_pte(*spte))
468 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
474 * Fetch a shadow pte for a specific level in the paging hierarchy.
475 * If the guest tries to write a write-protected page, we need to
476 * emulate this operation, return 1 to indicate this case.
478 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
479 struct guest_walker *gw,
480 int write_fault, int hlevel,
481 pfn_t pfn, bool map_writable, bool prefault)
483 struct kvm_mmu_page *sp = NULL;
484 struct kvm_shadow_walk_iterator it;
485 unsigned direct_access, access = gw->pt_access;
486 int top_level, emulate = 0;
488 direct_access = gw->pte_access;
490 top_level = vcpu->arch.mmu.root_level;
491 if (top_level == PT32E_ROOT_LEVEL)
492 top_level = PT32_ROOT_LEVEL;
494 * Verify that the top-level gpte is still there. Since the page
495 * is a root page, it is either write protected (and cannot be
496 * changed from now on) or it is invalid (in which case, we don't
497 * really care if it changes underneath us after this point).
499 if (FNAME(gpte_changed)(vcpu, gw, top_level))
500 goto out_gpte_changed;
502 for (shadow_walk_init(&it, vcpu, addr);
503 shadow_walk_okay(&it) && it.level > gw->level;
504 shadow_walk_next(&it)) {
507 clear_sp_write_flooding_count(it.sptep);
508 drop_large_spte(vcpu, it.sptep);
511 if (!is_shadow_present_pte(*it.sptep)) {
512 table_gfn = gw->table_gfn[it.level - 2];
513 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
514 false, access, it.sptep);
518 * Verify that the gpte in the page we've just write
519 * protected is still there.
521 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
522 goto out_gpte_changed;
525 link_shadow_page(it.sptep, sp);
529 shadow_walk_okay(&it) && it.level > hlevel;
530 shadow_walk_next(&it)) {
533 clear_sp_write_flooding_count(it.sptep);
534 validate_direct_spte(vcpu, it.sptep, direct_access);
536 drop_large_spte(vcpu, it.sptep);
538 if (is_shadow_present_pte(*it.sptep))
541 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
543 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
544 true, direct_access, it.sptep);
545 link_shadow_page(it.sptep, sp);
548 clear_sp_write_flooding_count(it.sptep);
549 mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
550 it.level, gw->gfn, pfn, prefault, map_writable);
551 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
557 kvm_mmu_put_page(sp, it.sptep);
558 kvm_release_pfn_clean(pfn);
563 * To see whether the mapped gfn can write its page table in the current
566 * It is the helper function of FNAME(page_fault). When guest uses large page
567 * size to map the writable gfn which is used as current page table, we should
568 * force kvm to use small page size to map it because new shadow page will be
569 * created when kvm establishes shadow page table that stop kvm using large
570 * page size. Do it early can avoid unnecessary #PF and emulation.
572 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
573 * currently used as its page table.
575 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
576 * since the PDPT is always shadowed, that means, we can not use large page
577 * size to map the gfn which is used as PDPT.
580 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
581 struct guest_walker *walker, int user_fault,
582 bool *write_fault_to_shadow_pgtable)
585 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
586 bool self_changed = false;
588 if (!(walker->pte_access & ACC_WRITE_MASK ||
589 (!is_write_protection(vcpu) && !user_fault)))
592 for (level = walker->level; level <= walker->max_level; level++) {
593 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
595 self_changed |= !(gfn & mask);
596 *write_fault_to_shadow_pgtable |= !gfn;
603 * Page fault handler. There are several causes for a page fault:
604 * - there is no shadow pte for the guest pte
605 * - write access through a shadow pte marked read only so that we can set
607 * - write access to a shadow pte marked read only so we can update the page
608 * dirty bitmap, when userspace requests it
609 * - mmio access; in this case we will never install a present shadow pte
610 * - normal guest page fault due to the guest pte marked not present, not
611 * writable, or not executable
613 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
614 * a negative value on error.
616 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
619 int write_fault = error_code & PFERR_WRITE_MASK;
620 int user_fault = error_code & PFERR_USER_MASK;
621 struct guest_walker walker;
624 int level = PT_PAGE_TABLE_LEVEL;
626 unsigned long mmu_seq;
627 bool map_writable, is_self_change_mapping;
629 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
631 if (unlikely(error_code & PFERR_RSVD_MASK)) {
632 r = handle_mmio_page_fault(vcpu, addr, error_code,
633 mmu_is_nested(vcpu));
634 if (likely(r != RET_MMIO_PF_INVALID))
638 r = mmu_topup_memory_caches(vcpu);
643 * Look up the guest pte for the faulting address.
645 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
648 * The page is not mapped by the guest. Let the guest handle it.
651 pgprintk("%s: guest page fault\n", __func__);
653 inject_page_fault(vcpu, &walker.fault);
658 vcpu->arch.write_fault_to_shadow_pgtable = false;
660 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
661 &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
663 if (walker.level >= PT_DIRECTORY_LEVEL)
664 force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
665 || is_self_change_mapping;
668 if (!force_pt_level) {
669 level = min(walker.level, mapping_level(vcpu, walker.gfn));
670 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
673 mmu_seq = vcpu->kvm->mmu_notifier_seq;
676 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
680 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
681 walker.gfn, pfn, walker.pte_access, &r))
685 * Do not change pte_access if the pfn is a mmio page, otherwise
686 * we will cache the incorrect access into mmio spte.
688 if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
689 !is_write_protection(vcpu) && !user_fault &&
690 !is_noslot_pfn(pfn)) {
691 walker.pte_access |= ACC_WRITE_MASK;
692 walker.pte_access &= ~ACC_USER_MASK;
695 * If we converted a user page to a kernel page,
696 * so that the kernel can write to it when cr0.wp=0,
697 * then we should prevent the kernel from executing it
698 * if SMEP is enabled.
700 if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
701 walker.pte_access &= ~ACC_EXEC_MASK;
704 spin_lock(&vcpu->kvm->mmu_lock);
705 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
708 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
709 make_mmu_pages_available(vcpu);
711 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
712 r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
713 level, pfn, map_writable, prefault);
714 ++vcpu->stat.pf_fixed;
715 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
716 spin_unlock(&vcpu->kvm->mmu_lock);
721 spin_unlock(&vcpu->kvm->mmu_lock);
722 kvm_release_pfn_clean(pfn);
726 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
730 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
733 offset = sp->role.quadrant << PT64_LEVEL_BITS;
735 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
738 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
740 struct kvm_shadow_walk_iterator iterator;
741 struct kvm_mmu_page *sp;
745 vcpu_clear_mmio_info(vcpu, gva);
748 * No need to check return value here, rmap_can_add() can
749 * help us to skip pte prefetch later.
751 mmu_topup_memory_caches(vcpu);
753 spin_lock(&vcpu->kvm->mmu_lock);
754 for_each_shadow_entry(vcpu, gva, iterator) {
755 level = iterator.level;
756 sptep = iterator.sptep;
758 sp = page_header(__pa(sptep));
759 if (is_last_spte(*sptep, level)) {
766 pte_gpa = FNAME(get_level1_sp_gpa)(sp);
767 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
769 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
770 kvm_flush_remote_tlbs(vcpu->kvm);
772 if (!rmap_can_add(vcpu))
775 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
776 sizeof(pt_element_t)))
779 FNAME(update_pte)(vcpu, sp, sptep, &gpte);
782 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
785 spin_unlock(&vcpu->kvm->mmu_lock);
788 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
789 struct x86_exception *exception)
791 struct guest_walker walker;
792 gpa_t gpa = UNMAPPED_GVA;
795 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
798 gpa = gfn_to_gpa(walker.gfn);
799 gpa |= vaddr & ~PAGE_MASK;
800 } else if (exception)
801 *exception = walker.fault;
806 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
808 struct x86_exception *exception)
810 struct guest_walker walker;
811 gpa_t gpa = UNMAPPED_GVA;
814 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
817 gpa = gfn_to_gpa(walker.gfn);
818 gpa |= vaddr & ~PAGE_MASK;
819 } else if (exception)
820 *exception = walker.fault;
826 * Using the cached information from sp->gfns is safe because:
827 * - The spte has a reference to the struct page, so the pfn for a given gfn
828 * can't change unless all sptes pointing to it are nuked first.
831 * We should flush all tlbs if spte is dropped even though guest is
832 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
833 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
834 * used by guest then tlbs are not flushed, so guest is allowed to access the
836 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
838 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
840 int i, nr_present = 0;
844 /* direct kvm_mmu_page can not be unsync. */
845 BUG_ON(sp->role.direct);
847 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
849 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
858 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
860 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
861 sizeof(pt_element_t)))
864 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
865 vcpu->kvm->tlbs_dirty++;
869 gfn = gpte_to_gfn(gpte);
870 pte_access = sp->role.access;
871 pte_access &= FNAME(gpte_access)(vcpu, gpte);
872 FNAME(protect_clean_gpte)(&pte_access, gpte);
874 if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
878 if (gfn != sp->gfns[i]) {
879 drop_spte(vcpu->kvm, &sp->spt[i]);
880 vcpu->kvm->tlbs_dirty++;
886 host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
888 set_spte(vcpu, &sp->spt[i], pte_access,
889 PT_PAGE_TABLE_LEVEL, gfn,
890 spte_to_pfn(sp->spt[i]), true, false,
900 #undef PT_BASE_ADDR_MASK
902 #undef PT_LVL_ADDR_MASK
903 #undef PT_LVL_OFFSET_MASK
905 #undef PT_MAX_FULL_LEVELS
907 #undef gpte_to_gfn_lvl
909 #undef PT_GUEST_ACCESSED_MASK
910 #undef PT_GUEST_DIRTY_MASK
911 #undef PT_GUEST_DIRTY_SHIFT
912 #undef PT_GUEST_ACCESSED_SHIFT