usb: dwc_otg hcd driver: fix a race condition
[firefly-linux-kernel-4.4.55.git] / mm / mmap.c
index 7dbe39745be9e86b05d51b6372a609ec268c6330..9aa554b7e620e1c0596a45035fbf0ebc3372dd0e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -893,7 +893,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
  * per-vma resources, so we don't attempt to merge those.
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
-                       struct file *file, unsigned long vm_flags)
+                       struct file *file, unsigned long vm_flags,
+                       const char __user *anon_name)
 {
        if (vma->vm_flags ^ vm_flags)
                return 0;
@@ -901,6 +902,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
                return 0;
        if (vma->vm_ops && vma->vm_ops->close)
                return 0;
+       if (vma_get_anon_name(vma) != anon_name)
+               return 0;
        return 1;
 }
 
@@ -931,9 +934,10 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
  */
 static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
-       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+       const char __user *anon_name)
 {
-       if (is_mergeable_vma(vma, file, vm_flags) &&
+       if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
                        return 1;
@@ -950,9 +954,10 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
  */
 static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
-       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+       struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
+       const char __user *anon_name)
 {
-       if (is_mergeable_vma(vma, file, vm_flags) &&
+       if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
                vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -963,9 +968,9 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -995,7 +1000,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        struct vm_area_struct *prev, unsigned long addr,
                        unsigned long end, unsigned long vm_flags,
                        struct anon_vma *anon_vma, struct file *file,
-                       pgoff_t pgoff, struct mempolicy *policy)
+                       pgoff_t pgoff, struct mempolicy *policy,
+                       const char __user *anon_name)
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
@@ -1021,15 +1027,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
         */
        if (prev && prev->vm_end == addr &&
                        mpol_equal(vma_policy(prev), policy) &&
-                       can_vma_merge_after(prev, vm_flags,
-                                               anon_vma, file, pgoff)) {
+                       can_vma_merge_after(prev, vm_flags, anon_vma,
+                                               file, pgoff, anon_name)) {
                /*
                 * OK, it can.  Can we now merge in the successor as well?
                 */
                if (next && end == next->vm_start &&
                                mpol_equal(policy, vma_policy(next)) &&
-                               can_vma_merge_before(next, vm_flags,
-                                       anon_vma, file, pgoff+pglen) &&
+                               can_vma_merge_before(next, vm_flags, anon_vma,
+                                               file, pgoff+pglen, anon_name) &&
                                is_mergeable_anon_vma(prev->anon_vma,
                                                      next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
@@ -1049,8 +1055,8 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
         */
        if (next && end == next->vm_start &&
                        mpol_equal(policy, vma_policy(next)) &&
-                       can_vma_merge_before(next, vm_flags,
-                                       anon_vma, file, pgoff+pglen)) {
+                       can_vma_merge_before(next, vm_flags, anon_vma,
+                                       file, pgoff+pglen, anon_name)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
                        err = vma_adjust(prev, prev->vm_start,
                                addr, prev->vm_pgoff, NULL);
@@ -1519,7 +1525,8 @@ munmap_back:
        /*
         * Can we just expand an old mapping?
         */
-       vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+       vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
+                       NULL, NULL);
        if (vma)
                goto out;
 
@@ -1853,7 +1860,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
 
-       if (len > TASK_SIZE)
+       if (len > TASK_SIZE - mmap_min_addr)
                return -ENOMEM;
 
        if (flags & MAP_FIXED)
@@ -1862,7 +1869,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
+               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
                    (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
@@ -1901,7 +1908,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct vm_unmapped_area_info info;
 
        /* requested length too big for entire address space */
-       if (len > TASK_SIZE)
+       if (len > TASK_SIZE - mmap_min_addr)
                return -ENOMEM;
 
        if (flags & MAP_FIXED)
@@ -1911,14 +1918,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
+               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
                                (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
-       info.low_limit = PAGE_SIZE;
+       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
        info.high_limit = mm->mmap_base;
        info.align_mask = 0;
        addr = vm_unmapped_area(&info);
@@ -2356,7 +2363,7 @@ static void unmap_region(struct mm_struct *mm,
        struct mmu_gather tlb;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end);
        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2663,7 +2670,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
 
        /* Can we just expand an old private anonymous mapping? */
        vma = vma_merge(mm, prev, addr, addr + len, flags,
-                                       NULL, NULL, pgoff, NULL);
+                                       NULL, NULL, pgoff, NULL, NULL);
        if (vma)
                goto out;
 
@@ -2735,7 +2742,7 @@ void exit_mmap(struct mm_struct *mm)
 
        lru_add_drain();
        flush_cache_mm(mm);
-       tlb_gather_mmu(&tlb, mm, 1);
+       tlb_gather_mmu(&tlb, mm, 0, -1);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
@@ -2821,7 +2828,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
                return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
-                       vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+                       vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+                       vma_get_anon_name(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma