Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[firefly-linux-kernel-4.4.55.git] / mm / huge_memory.c
index 4b06b8db9df23c8f33406586507bbaecf7f5444c..00cfd1ae2271cf8113475c3c7bc9a07eb687bce7 100644 (file)
@@ -151,7 +151,7 @@ static int start_stop_khugepaged(void)
                if (!khugepaged_thread)
                        khugepaged_thread = kthread_run(khugepaged, NULL,
                                                        "khugepaged");
-               if (unlikely(IS_ERR(khugepaged_thread))) {
+               if (IS_ERR(khugepaged_thread)) {
                        pr_err("khugepaged: kthread_run(khugepaged) failed\n");
                        err = PTR_ERR(khugepaged_thread);
                        khugepaged_thread = NULL;
@@ -1307,7 +1307,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                          pmd, _pmd,  1))
                        update_mmu_cache_pmd(vma, addr, pmd);
        }
-       if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
+       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
                if (page->mapping && trylock_page(page)) {
                        lru_add_drain();
                        if (page->mapping)
@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page,
                 * here). But it is generally safer to never allow
                 * small and huge TLB entries for the same virtual
                 * address to be loaded simultaneously. So instead of
-                * doing "pmd_populate(); flush_tlb_range();" we first
+                * doing "pmd_populate(); flush_pmd_tlb_range();" we first
                 * mark the current pmd notpresent (atomically because
                 * here the pmd_trans_huge and pmd_trans_splitting
                 * must remain set at all times on the pmd until the
@@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
             _pte++, address += PAGE_SIZE) {
                pte_t pteval = *_pte;
-               if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+               if (pte_none(pteval) || (pte_present(pteval) &&
+                               is_zero_pfn(pte_pfn(pteval)))) {
                        if (!userfaultfd_armed(vma) &&
                            ++none_or_zero <= khugepaged_max_ptes_none)
                                continue;