hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
index e198831276a3eab77b4a89fc0e1457a5a45d025d..e54b695336f94f91f3b847dad9e728f58c5fa58b 100644 (file)
@@ -24,8 +24,9 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <linux/io.h>
+#include <asm/tlb.h>
 
+#include <linux/io.h>
 #include <linux/hugetlb.h>
 #include <linux/node.h>
 #include "internal.h"
@@ -34,7 +35,7 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
 
-static int max_hstate;
+static int hugetlb_max_hstate;
 unsigned int default_hstate_idx;
 struct hstate hstates[HUGE_MAX_HSTATE];
 
@@ -46,7 +47,7 @@ static unsigned long __initdata default_hstate_max_huge_pages;
 static unsigned long __initdata default_hstate_size;
 
 #define for_each_hstate(h) \
-       for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
+       for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
 
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
@@ -1123,10 +1124,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
-               return ERR_PTR(-VM_FAULT_OOM);
+               return ERR_PTR(-ENOMEM);
        if (chg)
                if (hugepage_subpool_get_pages(spool, chg))
-                       return ERR_PTR(-VM_FAULT_SIGBUS);
+                       return ERR_PTR(-ENOSPC);
 
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
@@ -1136,7 +1137,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
                        hugepage_subpool_put_pages(spool, chg);
-                       return ERR_PTR(-VM_FAULT_SIGBUS);
+                       return ERR_PTR(-ENOSPC);
                }
        }
 
@@ -1646,7 +1647,7 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
                                    struct attribute_group *hstate_attr_group)
 {
        int retval;
-       int hi = h - hstates;
+       int hi = hstate_index(h);
 
        hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
        if (!hstate_kobjs[hi])
@@ -1741,11 +1742,13 @@ void hugetlb_unregister_node(struct node *node)
        if (!nhs->hugepages_kobj)
                return;         /* no hstate attributes */
 
-       for_each_hstate(h)
-               if (nhs->hstate_kobjs[h - hstates]) {
-                       kobject_put(nhs->hstate_kobjs[h - hstates]);
-                       nhs->hstate_kobjs[h - hstates] = NULL;
+       for_each_hstate(h) {
+               int idx = hstate_index(h);
+               if (nhs->hstate_kobjs[idx]) {
+                       kobject_put(nhs->hstate_kobjs[idx]);
+                       nhs->hstate_kobjs[idx] = NULL;
                }
+       }
 
        kobject_put(nhs->hugepages_kobj);
        nhs->hugepages_kobj = NULL;
@@ -1848,7 +1851,7 @@ static void __exit hugetlb_exit(void)
        hugetlb_unregister_all_nodes();
 
        for_each_hstate(h) {
-               kobject_put(hstate_kobjs[h - hstates]);
+               kobject_put(hstate_kobjs[hstate_index(h)]);
        }
 
        kobject_put(hugepages_kobj);
@@ -1869,7 +1872,7 @@ static int __init hugetlb_init(void)
                if (!size_to_hstate(default_hstate_size))
                        hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
        }
-       default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
+       default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
        if (default_hstate_max_huge_pages)
                default_hstate.max_huge_pages = default_hstate_max_huge_pages;
 
@@ -1897,9 +1900,9 @@ void __init hugetlb_add_hstate(unsigned order)
                printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
                return;
        }
-       BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
+       BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
        BUG_ON(order == 0);
-       h = &hstates[max_hstate++];
+       h = &hstates[hugetlb_max_hstate++];
        h->order = order;
        h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
        h->nr_huge_pages = 0;
@@ -1920,10 +1923,10 @@ static int __init hugetlb_nrpages_setup(char *s)
        static unsigned long *last_mhp;
 
        /*
-        * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
+        * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
         * so this hugepages= parameter goes to the "default hstate".
         */
-       if (!max_hstate)
+       if (!hugetlb_max_hstate)
                mhp = &default_hstate_max_huge_pages;
        else
                mhp = &parsed_hstate->max_huge_pages;
@@ -1942,7 +1945,7 @@ static int __init hugetlb_nrpages_setup(char *s)
         * But we need to allocate >= MAX_ORDER hstates here early to still
         * use the bootmem allocator.
         */
-       if (max_hstate && parsed_hstate->order >= MAX_ORDER)
+       if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
                hugetlb_hstate_alloc_pages(parsed_hstate);
 
        last_mhp = mhp;
@@ -2308,30 +2311,26 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
                return 0;
 }
 
-void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
-                           unsigned long end, struct page *ref_page)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+                           unsigned long start, unsigned long end,
+                           struct page *ref_page)
 {
+       int force_flush = 0;
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
        pte_t *ptep;
        pte_t pte;
        struct page *page;
-       struct page *tmp;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
 
-       /*
-        * A page gathering list, protected by per file i_mmap_mutex. The
-        * lock is used to avoid list corruption from multiple unmapping
-        * of the same page since we are using page->lru.
-        */
-       LIST_HEAD(page_list);
-
        WARN_ON(!is_vm_hugetlb_page(vma));
        BUG_ON(start & ~huge_page_mask(h));
        BUG_ON(end & ~huge_page_mask(h));
 
+       tlb_start_vma(tlb, vma);
        mmu_notifier_invalidate_range_start(mm, start, end);
+again:
        spin_lock(&mm->page_table_lock);
        for (address = start; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
@@ -2370,30 +2369,45 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                }
 
                pte = huge_ptep_get_and_clear(mm, address, ptep);
+               tlb_remove_tlb_entry(tlb, ptep, address);
                if (pte_dirty(pte))
                        set_page_dirty(page);
-               list_add(&page->lru, &page_list);
 
+               page_remove_rmap(page);
+               force_flush = !__tlb_remove_page(tlb, page);
+               if (force_flush)
+                       break;
                /* Bail out after unmapping reference page if supplied */
                if (ref_page)
                        break;
        }
-       flush_tlb_range(vma, start, end);
        spin_unlock(&mm->page_table_lock);
-       mmu_notifier_invalidate_range_end(mm, start, end);
-       list_for_each_entry_safe(page, tmp, &page_list, lru) {
-               page_remove_rmap(page);
-               list_del(&page->lru);
-               put_page(page);
+       /*
+        * mmu_gather ran out of room to batch pages, we break out of
+        * the PTE lock to avoid doing the potential expensive TLB invalidate
+        * and page-free while holding it.
+        */
+       if (force_flush) {
+               force_flush = 0;
+               tlb_flush_mmu(tlb);
+               if (address < end && !ref_page)
+                       goto again;
        }
+       mmu_notifier_invalidate_range_end(mm, start, end);
+       tlb_end_vma(tlb, vma);
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
-       __unmap_hugepage_range(vma, start, end, ref_page);
-       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+       struct mm_struct *mm;
+       struct mmu_gather tlb;
+
+       mm = vma->vm_mm;
+
+       tlb_gather_mmu(&tlb, mm, 0);
+       __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
+       tlb_finish_mmu(&tlb, start, end);
 }
 
 /*
@@ -2438,9 +2452,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                 * from the time of fork. This would look like data corruption
                 */
                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
-                       __unmap_hugepage_range(iter_vma,
-                               address, address + huge_page_size(h),
-                               page);
+                       unmap_hugepage_range(iter_vma, address,
+                                            address + huge_page_size(h), page);
        }
        mutex_unlock(&mapping->i_mmap_mutex);
 
@@ -2496,6 +2509,7 @@ retry_avoidcopy:
        new_page = alloc_huge_page(vma, address, outside_reserve);
 
        if (IS_ERR(new_page)) {
+               long err = PTR_ERR(new_page);
                page_cache_release(old_page);
 
                /*
@@ -2524,7 +2538,10 @@ retry_avoidcopy:
 
                /* Caller expects lock to be held */
                spin_lock(&mm->page_table_lock);
-               return -PTR_ERR(new_page);
+               if (err == -ENOMEM)
+                       return VM_FAULT_OOM;
+               else
+                       return VM_FAULT_SIGBUS;
        }
 
        /*
@@ -2642,7 +2659,11 @@ retry:
                        goto out;
                page = alloc_huge_page(vma, address, 0);
                if (IS_ERR(page)) {
-                       ret = -PTR_ERR(page);
+                       ret = PTR_ERR(page);
+                       if (ret == -ENOMEM)
+                               ret = VM_FAULT_OOM;
+                       else
+                               ret = VM_FAULT_SIGBUS;
                        goto out;
                }
                clear_huge_page(page, address, pages_per_huge_page(h));
@@ -2679,7 +2700,7 @@ retry:
                 */
                if (unlikely(PageHWPoison(page))) {
                        ret = VM_FAULT_HWPOISON |
-                             VM_FAULT_SET_HINDEX(h - hstates);
+                               VM_FAULT_SET_HINDEX(hstate_index(h));
                        goto backout_unlocked;
                }
        }
@@ -2752,7 +2773,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
-                              VM_FAULT_SET_HINDEX(h - hstates);
+                               VM_FAULT_SET_HINDEX(hstate_index(h));
        }
 
        ptep = huge_pte_alloc(mm, address, huge_page_size(h));