xen: Speed up set_phys_to_machine() by using read-only mappings
authorJuergen Gross <jgross@suse.com>
Fri, 28 Nov 2014 10:53:59 +0000 (11:53 +0100)
committerDavid Vrabel <david.vrabel@citrix.com>
Thu, 4 Dec 2014 14:09:20 +0000 (14:09 +0000)
Instead of checking at each call of set_phys_to_machine() whether a
new p2m page has to be allocated due to writing an entry in a large
invalid or identity area, just map those areas read only and react
to a page fault on write by allocating the new page.

This change will make the common path with no allocation much
faster as it only requires a single write of the new mfn instead
of walking the address translation tables and checking for the
special cases.

Suggested-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
arch/x86/xen/p2m.c

index 7d844739e513b02b8b53d099069adec92722a4a2..8b5db51be4dd5b0cecb164336412d356a75498be 100644 (file)
@@ -70,6 +70,7 @@
 
 #include <asm/cache.h>
 #include <asm/setup.h>
+#include <asm/uaccess.h>
 
 #include <asm/xen/page.h>
 #include <asm/xen/hypercall.h>
@@ -316,9 +317,9 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
        paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
        for (i = 0; i < PTRS_PER_PTE; i++) {
                set_pte(p2m_missing_pte + i,
-                       pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL));
+                       pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
                set_pte(p2m_identity_pte + i,
-                       pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL));
+                       pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
        }
 
        for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
@@ -365,7 +366,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
                                p2m_missing : p2m_identity;
                        ptep = populate_extra_pte((unsigned long)(p2m + pfn));
                        set_pte(ptep,
-                               pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
+                               pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
                        continue;
                }
 
@@ -624,6 +625,9 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
                return true;
        }
 
+       if (likely(!__put_user(mfn, xen_p2m_addr + pfn)))
+               return true;
+
        ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
        BUG_ON(!ptep || level != PG_LEVEL_4K);
 
@@ -633,9 +637,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
        if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
                return mfn == IDENTITY_FRAME(pfn);
 
-       xen_p2m_addr[pfn] = mfn;
-
-       return true;
+       return false;
 }
 
 bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)