drm/radeon: add DisplayPort MST support (v2)
[firefly-linux-kernel-4.4.55.git] / mm / memory.c
index bbe6a73a899d2bbfe173b3ad4821075390859b68..8068893697bbdbb5d64f9d43508658d601a6932e 100644 (file)
@@ -1965,6 +1965,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
        vmf.pgoff = page->index;
        vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
        vmf.page = page;
+       vmf.cow_page = NULL;
 
        ret = vma->vm_ops->page_mkwrite(vma, &vmf);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
@@ -2329,6 +2330,7 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
 
 
+       /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
        i_mmap_lock_write(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
@@ -2638,7 +2640,8 @@ oom:
  * See filemap_fault() and __lock_page_retry().
  */
 static int __do_fault(struct vm_area_struct *vma, unsigned long address,
-               pgoff_t pgoff, unsigned int flags, struct page **page)
+                       pgoff_t pgoff, unsigned int flags,
+                       struct page *cow_page, struct page **page)
 {
        struct vm_fault vmf;
        int ret;
@@ -2647,10 +2650,13 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
        vmf.pgoff = pgoff;
        vmf.flags = flags;
        vmf.page = NULL;
+       vmf.cow_page = cow_page;
 
        ret = vma->vm_ops->fault(vma, &vmf);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                return ret;
+       if (!vmf.page)
+               goto out;
 
        if (unlikely(PageHWPoison(vmf.page))) {
                if (ret & VM_FAULT_LOCKED)
@@ -2664,6 +2670,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
        else
                VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
 
+ out:
        *page = vmf.page;
        return ret;
 }
@@ -2834,7 +2841,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                pte_unmap_unlock(pte, ptl);
        }
 
-       ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+       ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                return ret;
 
@@ -2874,26 +2881,43 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                return VM_FAULT_OOM;
        }
 
-       ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+       ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                goto uncharge_out;
 
-       copy_user_highpage(new_page, fault_page, address, vma);
+       if (fault_page)
+               copy_user_highpage(new_page, fault_page, address, vma);
        __SetPageUptodate(new_page);
 
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (unlikely(!pte_same(*pte, orig_pte))) {
                pte_unmap_unlock(pte, ptl);
-               unlock_page(fault_page);
-               page_cache_release(fault_page);
+               if (fault_page) {
+                       unlock_page(fault_page);
+                       page_cache_release(fault_page);
+               } else {
+                       /*
+                        * The fault handler has no page to lock, so it holds
+                        * i_mmap_lock for read to protect against truncate.
+                        */
+                       i_mmap_unlock_read(vma->vm_file->f_mapping);
+               }
                goto uncharge_out;
        }
        do_set_pte(vma, address, new_page, pte, true, true);
        mem_cgroup_commit_charge(new_page, memcg, false);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pte_unmap_unlock(pte, ptl);
-       unlock_page(fault_page);
-       page_cache_release(fault_page);
+       if (fault_page) {
+               unlock_page(fault_page);
+               page_cache_release(fault_page);
+       } else {
+               /*
+                * The fault handler has no page to lock, so it holds
+                * i_mmap_lock for read to protect against truncate.
+                */
+               i_mmap_unlock_read(vma->vm_file->f_mapping);
+       }
        return ret;
 uncharge_out:
        mem_cgroup_cancel_charge(new_page, memcg);
@@ -2912,7 +2936,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        int dirtied = 0;
        int ret, tmp;
 
-       ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+       ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                return ret;
 
@@ -3013,14 +3037,17 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        bool migrated = false;
        int flags = 0;
 
+       /* A PROT_NONE fault should not end up here */
+       BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
+
        /*
        * The "pte" at this point cannot be used safely without
        * validation through pte_unmap_same(). It's of NUMA type but
        * the pfn may be screwed if the read is non atomic.
        *
-       * ptep_modify_prot_start is not called as this is clearing
-       * the _PAGE_NUMA bit and it is not really expected that there
-       * would be concurrent hardware modifications to the PTE.
+       * We can safely just do a "set_pte_at()", because the old
+       * page table entry is not accessible, so there would be no
+       * concurrent hardware modifications to the PTE.
        */
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
@@ -3029,7 +3056,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out;
        }
 
-       pte = pte_mknonnuma(pte);
+       /* Make it present again */
+       pte = pte_modify(pte, vma->vm_page_prot);
+       pte = pte_mkyoung(pte);
        set_pte_at(mm, addr, ptep, pte);
        update_mmu_cache(vma, addr, ptep);
 
@@ -3038,7 +3067,6 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                pte_unmap_unlock(ptep, ptl);
                return 0;
        }
-       BUG_ON(is_zero_pfn(page_to_pfn(page)));
 
        /*
         * Avoid grouping on DSO/COW pages in specific and RO pages
@@ -3124,7 +3152,7 @@ static int handle_pte_fault(struct mm_struct *mm,
                                        pte, pmd, flags, entry);
        }
 
-       if (pte_numa(entry))
+       if (pte_protnone(entry))
                return do_numa_page(mm, vma, address, entry, pte, pmd);
 
        ptl = pte_lockptr(mm, pmd);
@@ -3202,7 +3230,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        if (pmd_trans_splitting(orig_pmd))
                                return 0;
 
-                       if (pmd_numa(orig_pmd))
+                       if (pmd_protnone(orig_pmd))
                                return do_huge_pmd_numa_page(mm, vma, address,
                                                             orig_pmd, pmd);
 
@@ -3458,7 +3486,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
        if (follow_phys(vma, addr, write, &prot, &phys_addr))
                return -EINVAL;
 
-       maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+       maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
        if (write)
                memcpy_toio(maddr + offset, buf, len);
        else