mm: compaction: do not mark unmovable pageblocks as skipped in async compaction
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
index 7d57af21f49e920776979dfe11023780ebff6afb..04306b9de90d49300a405499d01bd767c6679e49 100644 (file)
@@ -476,40 +476,6 @@ static int vma_has_reserves(struct vm_area_struct *vma, long chg)
        return 0;
 }
 
-static void copy_gigantic_page(struct page *dst, struct page *src)
-{
-       int i;
-       struct hstate *h = page_hstate(src);
-       struct page *dst_base = dst;
-       struct page *src_base = src;
-
-       for (i = 0; i < pages_per_huge_page(h); ) {
-               cond_resched();
-               copy_highpage(dst, src);
-
-               i++;
-               dst = mem_map_next(dst, dst_base, i);
-               src = mem_map_next(src, src_base, i);
-       }
-}
-
-void copy_huge_page(struct page *dst, struct page *src)
-{
-       int i;
-       struct hstate *h = page_hstate(src);
-
-       if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
-               copy_gigantic_page(dst, src);
-               return;
-       }
-
-       might_sleep();
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               cond_resched();
-               copy_highpage(dst + i, src + i);
-       }
-}
-
 static void enqueue_huge_page(struct hstate *h, struct page *page)
 {
        int nid = page_to_nid(page);
@@ -724,18 +690,26 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  */
 int PageHuge(struct page *page)
 {
-       compound_page_dtor *dtor;
-
        if (!PageCompound(page))
                return 0;
 
        page = compound_head(page);
-       dtor = get_compound_page_dtor(page);
-
-       return dtor == free_huge_page;
+       return get_compound_page_dtor(page) == free_huge_page;
 }
 EXPORT_SYMBOL_GPL(PageHuge);
 
+/*
+ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
+ * normal or transparent huge pages.
+ */
+int PageHeadHuge(struct page *page_head)
+{
+       if (!PageHead(page_head))
+               return 0;
+
+       return get_compound_page_dtor(page_head) == free_huge_page;
+}
+
 pgoff_t __basepage_index(struct page *page)
 {
        struct page *page_head = compound_head(page);
@@ -1297,9 +1271,9 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
        for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
                void *addr;
 
-               addr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
-                               huge_page_size(h), huge_page_size(h), 0);
-
+               addr = memblock_virt_alloc_try_nid_nopanic(
+                               huge_page_size(h), huge_page_size(h),
+                               0, BOOTMEM_ALLOC_ACCESSIBLE, node);
                if (addr) {
                        /*
                         * Use the beginning of the huge page to store the
@@ -1339,8 +1313,8 @@ static void __init gather_bootmem_prealloc(void)
 
 #ifdef CONFIG_HIGHMEM
                page = pfn_to_page(m->phys >> PAGE_SHIFT);
-               free_bootmem_late((unsigned long)m,
-                                 sizeof(struct huge_bootmem_page));
+               memblock_free_late(__pa(m),
+                                  sizeof(struct huge_bootmem_page));
 #else
                page = virt_to_page(m);
 #endif
@@ -2372,17 +2346,27 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        int cow;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
+       int ret = 0;
 
        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 
+       mmun_start = vma->vm_start;
+       mmun_end = vma->vm_end;
+       if (cow)
+               mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
+
        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
                spinlock_t *src_ptl, *dst_ptl;
                src_pte = huge_pte_offset(src, addr);
                if (!src_pte)
                        continue;
                dst_pte = huge_pte_alloc(dst, addr, sz);
-               if (!dst_pte)
-                       goto nomem;
+               if (!dst_pte) {
+                       ret = -ENOMEM;
+                       break;
+               }
 
                /* If the pagetables are shared don't copy or take references */
                if (dst_pte == src_pte)
@@ -2403,10 +2387,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
        }
-       return 0;
 
-nomem:
-       return -ENOMEM;
+       if (cow)
+               mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
+
+       return ret;
 }
 
 static int is_hugetlb_entry_migration(pte_t pte)
@@ -3096,7 +3081,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 same_page:
                if (pages) {
                        pages[i] = mem_map_offset(page, pfn_offset);
-                       get_page(pages[i]);
+                       get_page_foll(pages[i]);
                }
 
                if (vmas)