Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/cooloney...
[firefly-linux-kernel-4.4.55.git] / mm / huge_memory.c
index 7de1bf85f6833422e16161445b71e328fad2e1f6..82166bf974e14262ecfb064ea7c173d006d3ab98 100644 (file)
@@ -130,8 +130,14 @@ static int set_recommended_min_free_kbytes(void)
                              (unsigned long) nr_free_buffer_pages() / 20);
        recommended_min <<= (PAGE_SHIFT-10);
 
-       if (recommended_min > min_free_kbytes)
+       if (recommended_min > min_free_kbytes) {
+               if (user_min_free_kbytes >= 0)
+                       pr_info("raising min_free_kbytes from %d to %lu "
+                               "to help transparent hugepage allocations\n",
+                               min_free_kbytes, recommended_min);
+
                min_free_kbytes = recommended_min;
+       }
        setup_per_zone_wmarks();
        return 0;
 }
@@ -655,7 +661,7 @@ out:
        hugepage_exit_sysfs(hugepage_kobj);
        return err;
 }
-module_init(hugepage_init)
+subsys_initcall(hugepage_init);
 
 static int __init setup_transparent_hugepage(char *str)
 {
@@ -712,7 +718,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        pgtable_t pgtable;
        spinlock_t *ptl;
 
-       VM_BUG_ON(!PageCompound(page));
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
        pgtable = pte_alloc_one(mm, haddr);
        if (unlikely(!pgtable))
                return VM_FAULT_OOM;
@@ -883,9 +889,6 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                goto out_unlock;
        }
 
-       /* mmap_sem prevents this happening but warn if that changes */
-       WARN_ON(pmd_trans_migrating(pmd));
-
        if (unlikely(pmd_trans_splitting(pmd))) {
                /* split huge page running from under us */
                spin_unlock(src_ptl);
@@ -896,7 +899,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                goto out;
        }
        src_page = pmd_page(pmd);
-       VM_BUG_ON(!PageHead(src_page));
+       VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
        get_page(src_page);
        page_dup_rmap(src_page);
        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
@@ -1070,7 +1073,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        ptl = pmd_lock(mm, pmd);
        if (unlikely(!pmd_same(*pmd, orig_pmd)))
                goto out_free_pages;
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
 
        pmdp_clear_flush(vma, haddr, pmd);
        /* leave pmd empty until pte is filled */
@@ -1136,7 +1139,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_unlock;
 
        page = pmd_page(orig_pmd);
-       VM_BUG_ON(!PageCompound(page) || !PageHead(page));
+       VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
        if (page_mapcount(page) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
@@ -1157,7 +1160,7 @@ alloc:
                new_page = NULL;
 
        if (unlikely(!new_page)) {
-               if (is_huge_zero_pmd(orig_pmd)) {
+               if (!page) {
                        ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
                                        address, pmd, orig_pmd, haddr);
                } else {
@@ -1184,7 +1187,7 @@ alloc:
 
        count_vm_event(THP_FAULT_ALLOC);
 
-       if (is_huge_zero_pmd(orig_pmd))
+       if (!page)
                clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
        else
                copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
@@ -1210,11 +1213,11 @@ alloc:
                page_add_new_anon_rmap(new_page, vma, haddr);
                set_pmd_at(mm, haddr, pmd, entry);
                update_mmu_cache_pmd(vma, address, pmd);
-               if (is_huge_zero_pmd(orig_pmd)) {
+               if (!page) {
                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
                        put_huge_zero_page();
                } else {
-                       VM_BUG_ON(!PageHead(page));
+                       VM_BUG_ON_PAGE(!PageHead(page), page);
                        page_remove_rmap(page);
                        put_page(page);
                }
@@ -1252,7 +1255,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                goto out;
 
        page = pmd_page(*pmd);
-       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON_PAGE(!PageHead(page), page);
        if (flags & FOLL_TOUCH) {
                pmd_t _pmd;
                /*
@@ -1277,7 +1280,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                }
        }
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
-       VM_BUG_ON(!PageCompound(page));
+       VM_BUG_ON_PAGE(!PageCompound(page), page);
        if (flags & FOLL_GET)
                get_page_foll(page);
 
@@ -1435,9 +1438,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                } else {
                        page = pmd_page(orig_pmd);
                        page_remove_rmap(page);
-                       VM_BUG_ON(page_mapcount(page) < 0);
+                       VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-                       VM_BUG_ON(!PageHead(page));
+                       VM_BUG_ON_PAGE(!PageHead(page), page);
                        atomic_long_dec(&tlb->mm->nr_ptes);
                        spin_unlock(ptl);
                        tlb_remove_page(tlb, page);
@@ -1505,19 +1508,15 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
                        spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
                pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
                VM_BUG_ON(!pmd_none(*new_pmd));
-               set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
-               if (new_ptl != old_ptl) {
-                       pgtable_t pgtable;
 
-                       /*
-                        * Move preallocated PTE page table if new_pmd is on
-                        * different PMD page table.
-                        */
+               if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
+                       pgtable_t pgtable;
                        pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
                        pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
-
-                       spin_unlock(new_ptl);
                }
+               set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
+               if (new_ptl != old_ptl)
+                       spin_unlock(new_ptl);
                spin_unlock(old_ptl);
        }
 out:
@@ -2179,9 +2178,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                if (unlikely(!page))
                        goto out;
 
-               VM_BUG_ON(PageCompound(page));
-               BUG_ON(!PageAnon(page));
-               VM_BUG_ON(!PageSwapBacked(page));
+               VM_BUG_ON_PAGE(PageCompound(page), page);
+               VM_BUG_ON_PAGE(!PageAnon(page), page);
+               VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
 
                /* cannot use mapcount: can't collapse if there's a gup pin */
                if (page_count(page) != 1)
@@ -2204,8 +2203,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                }
                /* 0 stands for page_is_file_cache(page) == false */
                inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
-               VM_BUG_ON(!PageLocked(page));
-               VM_BUG_ON(PageLRU(page));
+               VM_BUG_ON_PAGE(!PageLocked(page), page);
+               VM_BUG_ON_PAGE(PageLRU(page), page);
 
                /* If there is no mapped pte young don't collapse the page */
                if (pte_young(pteval) || PageReferenced(page) ||
@@ -2235,7 +2234,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                } else {
                        src_page = pte_page(pteval);
                        copy_user_highpage(page, src_page, address, vma);
-                       VM_BUG_ON(page_mapcount(src_page) != 1);
+                       VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
                        release_pte_page(src_page);
                        /*
                         * ptl mostly unnecessary, but preempt has to
@@ -2314,7 +2313,7 @@ static struct page
                       struct vm_area_struct *vma, unsigned long address,
                       int node)
 {
-       VM_BUG_ON(*hpage);
+       VM_BUG_ON_PAGE(*hpage, *hpage);
        /*
         * Allocate the page while the vma is still valid and under
         * the mmap_sem read mode so there is no memory allocation
@@ -2583,7 +2582,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 */
                node = page_to_nid(page);
                khugepaged_node_load[node]++;
-               VM_BUG_ON(PageCompound(page));
+               VM_BUG_ON_PAGE(PageCompound(page), page);
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
                        goto out_unmap;
                /* cannot use mapcount: can't collapse if there's a gup pin */
@@ -2879,7 +2878,7 @@ again:
                return;
        }
        page = pmd_page(*pmd);
-       VM_BUG_ON(!page_count(page));
+       VM_BUG_ON_PAGE(!page_count(page), page);
        get_page(page);
        spin_unlock(ptl);
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);