Merge branch 'config' into late-for-linus
[firefly-linux-kernel-4.4.55.git] / mm / rmap.c
index 9c61bf387fd1bc369638a3c33b5457f4bedb5ec0..7df7984d476c8661b900725c0e2c3f33c883a32d 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
 
 /*
  * At what user virtual address is page expected in @vma?
- * Returns virtual address or -EFAULT if page's index/offset is not
- * within the range mapped the @vma.
  */
-inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+static inline unsigned long
+__vma_address(struct page *page, struct vm_area_struct *vma)
 {
        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-       unsigned long address;
 
        if (unlikely(is_vm_hugetlb_page(vma)))
                pgoff = page->index << huge_page_order(page_hstate(page));
-       address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
-       if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
-               /* page should be within @vma mapping range */
-               return -EFAULT;
-       }
+
+       return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+}
+
+inline unsigned long
+vma_address(struct page *page, struct vm_area_struct *vma)
+{
+       unsigned long address = __vma_address(page, vma);
+
+       /* page should be within @vma mapping range */
+       VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+
        return address;
 }
 
@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
+       unsigned long address;
        if (PageAnon(page)) {
                struct anon_vma *page__anon_vma = page_anon_vma(page);
                /*
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
                        return -EFAULT;
        } else
                return -EFAULT;
-       return vma_address(page, vma);
+       address = __vma_address(page, vma);
+       if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+               return -EFAULT;
+       return address;
 }
 
 /*
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
        pte_t *pte;
        spinlock_t *ptl;
 
-       address = vma_address(page, vma);
-       if (address == -EFAULT)         /* out of vma range */
+       address = __vma_address(page, vma);
+       if (unlikely(address < vma->vm_start || address >= vma->vm_end))
                return 0;
        pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
        if (!pte)                       /* the page is not in this mm */
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
-               if (address == -EFAULT)
-                       continue;
                /*
                 * If we are reclaiming on behalf of a cgroup, skip
                 * counting on behalf of references from different
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
 
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
-               if (address == -EFAULT)
-                       continue;
                /*
                 * If we are reclaiming on behalf of a cgroup, skip
                 * counting on behalf of references from different
@@ -880,7 +884,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
                pte_t entry;
 
                flush_cache_page(vma, address, pte_pfn(*pte));
-               entry = ptep_clear_flush_notify(vma, address, pte);
+               entry = ptep_clear_flush(vma, address, pte);
                entry = pte_wrprotect(entry);
                entry = pte_mkclean(entry);
                set_pte_at(mm, address, pte, entry);
@@ -888,6 +892,9 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
        }
 
        pte_unmap_unlock(pte, ptl);
+
+       if (ret)
+               mmu_notifier_invalidate_page(mm, address);
 out:
        return ret;
 }
@@ -904,8 +911,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                if (vma->vm_flags & VM_SHARED) {
                        unsigned long address = vma_address(page, vma);
-                       if (address == -EFAULT)
-                               continue;
                        ret += page_mkclean_one(page, vma, address);
                }
        }
@@ -1078,7 +1083,7 @@ void page_add_new_anon_rmap(struct page *page,
        else
                __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __page_set_anon_rmap(page, vma, address, 1);
-       if (page_evictable(page, vma))
+       if (!mlocked_vma_newpage(vma, page))
                lru_cache_add_lru(page, LRU_ACTIVE_ANON);
        else
                add_page_to_unevictable_list(page);
@@ -1153,7 +1158,10 @@ void page_remove_rmap(struct page *page)
        } else {
                __dec_zone_page_state(page, NR_FILE_MAPPED);
                mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
+               mem_cgroup_end_update_page_stat(page, &locked, &flags);
        }
+       if (unlikely(PageMlocked(page)))
+               clear_page_mlock(page);
        /*
         * It would be tidy to reset the PageAnon mapping here,
         * but that might overwrite a racing page_add_anon_rmap
@@ -1163,6 +1171,7 @@ void page_remove_rmap(struct page *page)
         * Leaving it set also helps swapoff to reinstate ptes
         * faster for those pages still in swapcache.
         */
+       return;
 out:
        if (!anon)
                mem_cgroup_end_update_page_stat(page, &locked, &flags);
@@ -1206,7 +1215,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 
        /* Nuke the page table entry. */
        flush_cache_page(vma, address, page_to_pfn(page));
-       pteval = ptep_clear_flush_notify(vma, address, pte);
+       pteval = ptep_clear_flush(vma, address, pte);
 
        /* Move the dirty bit to the physical page now the pte is gone. */
        if (pte_dirty(pteval))
@@ -1268,6 +1277,8 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 
 out_unmap:
        pte_unmap_unlock(pte, ptl);
+       if (ret != SWAP_FAIL)
+               mmu_notifier_invalidate_page(mm, address);
 out:
        return ret;
 
@@ -1332,6 +1343,8 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
        spinlock_t *ptl;
        struct page *page;
        unsigned long address;
+       unsigned long mmun_start;       /* For mmu_notifiers */
+       unsigned long mmun_end;         /* For mmu_notifiers */
        unsigned long end;
        int ret = SWAP_AGAIN;
        int locked_vma = 0;
@@ -1355,6 +1368,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
        if (!pmd_present(*pmd))
                return ret;
 
+       mmun_start = address;
+       mmun_end   = end;
+       mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
        /*
         * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
         * keep the sem while scanning the cluster for mlocking pages.
@@ -1388,7 +1405,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
 
                /* Nuke the page table entry. */
                flush_cache_page(vma, address, pte_pfn(*pte));
-               pteval = ptep_clear_flush_notify(vma, address, pte);
+               pteval = ptep_clear_flush(vma, address, pte);
 
                /* If nonlinear, store the file page offset in the pte. */
                if (page->index != linear_page_index(vma, address))
@@ -1404,6 +1421,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
                (*mapcount)--;
        }
        pte_unmap_unlock(pte - 1, ptl);
+       mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        if (locked_vma)
                up_read(&vma->vm_mm->mmap_sem);
        return ret;
@@ -1468,8 +1486,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
                        continue;
 
                address = vma_address(page, vma);
-               if (address == -EFAULT)
-                       continue;
                ret = try_to_unmap_one(page, vma, address, flags);
                if (ret != SWAP_AGAIN || !page_mapped(page))
                        break;
@@ -1508,8 +1524,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
        mutex_lock(&mapping->i_mmap_mutex);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
-               if (address == -EFAULT)
-                       continue;
                ret = try_to_unmap_one(page, vma, address, flags);
                if (ret != SWAP_AGAIN || !page_mapped(page))
                        goto out;
@@ -1684,8 +1698,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
-               if (address == -EFAULT)
-                       continue;
                ret = rmap_one(page, vma, address, arg);
                if (ret != SWAP_AGAIN)
                        break;
@@ -1707,8 +1719,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
        mutex_lock(&mapping->i_mmap_mutex);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                unsigned long address = vma_address(page, vma);
-               if (address == -EFAULT)
-                       continue;
                ret = rmap_one(page, vma, address, arg);
                if (ret != SWAP_AGAIN)
                        break;