x86/mm/pageattr: Add last levels of error path
authorBorislav Petkov <bp@suse.de>
Thu, 31 Oct 2013 16:25:06 +0000 (17:25 +0100)
committerMatt Fleming <matt.fleming@intel.com>
Sat, 2 Nov 2013 11:09:31 +0000 (11:09 +0000)
We try to free the pagetable pages once we've unmapped our portion.

Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
arch/x86/mm/pageattr.c

index 1cbdbbc35b47cbac0d082cdf7f3cbd76073fd3ee..db8ace29514f3a158bed721dad3cb4992e947c93 100644 (file)
@@ -666,7 +666,99 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        return 0;
 }
 
-#define unmap_pmd_range(pud, start, pre_end)           do {} while (0)
+static bool try_to_free_pte_page(pte_t *pte)
+{
+       int i;
+
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               if (!pte_none(pte[i]))
+                       return false;
+
+       free_page((unsigned long)pte);
+       return true;
+}
+
+static bool try_to_free_pmd_page(pmd_t *pmd)
+{
+       int i;
+
+       for (i = 0; i < PTRS_PER_PMD; i++)
+               if (!pmd_none(pmd[i]))
+                       return false;
+
+       free_page((unsigned long)pmd);
+       return true;
+}
+
+static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
+{
+       pte_t *pte = pte_offset_kernel(pmd, start);
+
+       while (start < end) {
+               set_pte(pte, __pte(0));
+
+               start += PAGE_SIZE;
+               pte++;
+       }
+
+       if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
+               pmd_clear(pmd);
+               return true;
+       }
+       return false;
+}
+
+static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
+                             unsigned long start, unsigned long end)
+{
+       if (unmap_pte_range(pmd, start, end))
+               if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+                       pud_clear(pud);
+}
+
+static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
+{
+       pmd_t *pmd = pmd_offset(pud, start);
+
+       /*
+        * Not on a 2MB page boundary?
+        */
+       if (start & (PMD_SIZE - 1)) {
+               unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
+               unsigned long pre_end = min_t(unsigned long, end, next_page);
+
+               __unmap_pmd_range(pud, pmd, start, pre_end);
+
+               start = pre_end;
+               pmd++;
+       }
+
+       /*
+        * Try to unmap in 2M chunks.
+        */
+       while (end - start >= PMD_SIZE) {
+               if (pmd_large(*pmd))
+                       pmd_clear(pmd);
+               else
+                       __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
+
+               start += PMD_SIZE;
+               pmd++;
+       }
+
+       /*
+        * 4K leftovers?
+        */
+       if (start < end)
+               return __unmap_pmd_range(pud, pmd, start, end);
+
+       /*
+        * Try again to free the PMD page if haven't succeeded above.
+        */
+       if (!pud_none(*pud))
+               if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+                       pud_clear(pud);
+}
 
 static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
 {