projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
KVM: MMU: fast invalidate all mmio sptes
[firefly-linux-kernel-4.4.55.git]
/
arch
/
x86
/
kvm
/
paging_tmpl.h
diff --git
a/arch/x86/kvm/paging_tmpl.h
b/arch/x86/kvm/paging_tmpl.h
index da20860b457a4c33bc7c17d6cece102b8b8f7216..7769699d48a80caac0e1d8402280ab1e15b99bed 100644
(file)
--- a/
arch/x86/kvm/paging_tmpl.h
+++ b/
arch/x86/kvm/paging_tmpl.h
@@
-552,9
+552,12
@@
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
- if (unlikely(error_code & PFERR_RSVD_MASK))
- r
eturn
handle_mmio_page_fault(vcpu, addr, error_code,
+ if (unlikely(error_code & PFERR_RSVD_MASK))
{
+ r
=
handle_mmio_page_fault(vcpu, addr, error_code,
mmu_is_nested(vcpu));
mmu_is_nested(vcpu));
+ if (likely(r != RET_MMIO_PF_INVALID))
+ return r;
+ };
r = mmu_topup_memory_caches(vcpu);
if (r)
r = mmu_topup_memory_caches(vcpu);
if (r)
@@
-792,7
+795,8
@@
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
pte_access &= gpte_access(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);
pte_access &= gpte_access(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte);
- if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
+ if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
+ &nr_present))
continue;
if (gfn != sp->gfns[i]) {
continue;
if (gfn != sp->gfns[i]) {