powerpc: Reduce PTE table memory wastage
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / mm / pgtable_64.c
index 654258f165aeb9496ae96131c0f9b383927e8d20..a854096e102329608e4b1f33bd5f5886cfefe46b 100644 (file)
@@ -337,3 +337,121 @@ EXPORT_SYMBOL(__ioremap_at);
 EXPORT_SYMBOL(iounmap);
 EXPORT_SYMBOL(__iounmap);
 EXPORT_SYMBOL(__iounmap_at);
+
+#ifdef CONFIG_PPC_64K_PAGES
+static pte_t *get_from_cache(struct mm_struct *mm)
+{
+       void *pte_frag, *ret;
+
+       spin_lock(&mm->page_table_lock);
+       ret = mm->context.pte_frag;
+       if (ret) {
+               pte_frag = ret + PTE_FRAG_SIZE;
+               /*
+                * If we have taken up all the fragments mark PTE page NULL
+                */
+               if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
+                       pte_frag = NULL;
+               mm->context.pte_frag = pte_frag;
+       }
+       spin_unlock(&mm->page_table_lock);
+       return (pte_t *)ret;
+}
+
+static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
+{
+       void *ret = NULL;
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
+                                      __GFP_REPEAT | __GFP_ZERO);
+       if (!page)
+               return NULL;
+
+       ret = page_address(page);
+       spin_lock(&mm->page_table_lock);
+       /*
+        * If we find pgtable_page set, we return
+        * the allocated page with single fragement
+        * count.
+        */
+       if (likely(!mm->context.pte_frag)) {
+               atomic_set(&page->_count, PTE_FRAG_NR);
+               mm->context.pte_frag = ret + PTE_FRAG_SIZE;
+       }
+       spin_unlock(&mm->page_table_lock);
+
+       if (!kernel)
+               pgtable_page_ctor(page);
+
+       return (pte_t *)ret;
+}
+
+pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+{
+       pte_t *pte;
+
+       pte = get_from_cache(mm);
+       if (pte)
+               return pte;
+
+       return __alloc_for_cache(mm, kernel);
+}
+
+void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
+{
+       struct page *page = virt_to_page(table);
+       if (put_page_testzero(page)) {
+               if (!kernel)
+                       pgtable_page_dtor(page);
+               free_hot_cold_page(page, 0);
+       }
+}
+
+#ifdef CONFIG_SMP
+static void page_table_free_rcu(void *table)
+{
+       struct page *page = virt_to_page(table);
+       if (put_page_testzero(page)) {
+               pgtable_page_dtor(page);
+               free_hot_cold_page(page, 0);
+       }
+}
+
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+       unsigned long pgf = (unsigned long)table;
+
+       BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+       pgf |= shift;
+       tlb_remove_table(tlb, (void *)pgf);
+}
+
+void __tlb_remove_table(void *_table)
+{
+       void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+       unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+       if (!shift)
+               /* PTE page needs special handling */
+               page_table_free_rcu(table);
+       else {
+               BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+               kmem_cache_free(PGT_CACHE(shift), table);
+       }
+}
+#else
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+       if (!shift) {
+               /* PTE page needs special handling */
+               struct page *page = virt_to_page(table);
+               if (put_page_testzero(page)) {
+                       pgtable_page_dtor(page);
+                       free_hot_cold_page(page, 0);
+               }
+       } else {
+               BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+               kmem_cache_free(PGT_CACHE(shift), table);
+       }
+}
+#endif
+#endif /* CONFIG_PPC_64K_PAGES */