mm: revert "thp: make MADV_HUGEPAGE check for mm->def_flags"
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
index 7c02b9dadfb05b28e2aef363523f021f933bdb6d..ed5072c64daac1a40ceaec919e73bb7dfb93f0ad 100644 (file)
@@ -2690,7 +2690,8 @@ retry_avoidcopy:
                                BUG_ON(huge_pte_none(pte));
                                spin_lock(ptl);
                                ptep = huge_pte_offset(mm, address & huge_page_mask(h));
-                               if (likely(pte_same(huge_ptep_get(ptep), pte)))
+                               if (likely(ptep &&
+                                          pte_same(huge_ptep_get(ptep), pte)))
                                        goto retry_avoidcopy;
                                /*
                                 * race occurs while re-acquiring page table
@@ -2734,7 +2735,7 @@ retry_avoidcopy:
         */
        spin_lock(ptl);
        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
-       if (likely(pte_same(huge_ptep_get(ptep), pte))) {
+       if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
                ClearPagePrivate(new_page);
 
                /* Break COW */
@@ -3185,6 +3186,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
+       mmu_notifier_invalidate_range_start(mm, start, end);
        mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        for (; address < end; address += huge_page_size(h)) {
                spinlock_t *ptl;
@@ -3214,6 +3216,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         */
        flush_tlb_range(vma, start, end);
        mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+       mmu_notifier_invalidate_range_end(mm, start, end);
 
        return pages << h->order;
 }