Merge branch 'perf/core' into perf/uprobes
[firefly-linux-kernel-4.4.55.git] / mm / mmap.c
index da15a79b1441b665b0e4b962eea38a92407018fd..b17a39f31a5efd66fecda38d1f3804a4eb41c234 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -30,6 +30,7 @@
 #include <linux/perf_event.h>
 #include <linux/audit.h>
 #include <linux/khugepaged.h>
+#include <linux/uprobes.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -217,6 +218,7 @@ void unlink_file_vma(struct vm_area_struct *vma)
                mutex_lock(&mapping->i_mmap_mutex);
                __remove_shared_vm_struct(vma, file, mapping);
                mutex_unlock(&mapping->i_mmap_mutex);
+               uprobe_munmap(vma);
        }
 }
 
@@ -451,9 +453,8 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 /*
- * Helper for vma_adjust in the split_vma insert case:
- * insert vm structure into list and rbtree and anon_vma,
- * but it has already been inserted into prio_tree earlier.
+ * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
+ * mm's list and rbtree.  It has already been inserted into the prio_tree.
  */
 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
@@ -545,8 +546,14 @@ again:                     remove_next = 1 + (end > next->vm_end);
 
        if (file) {
                mapping = file->f_mapping;
-               if (!(vma->vm_flags & VM_NONLINEAR))
+               if (!(vma->vm_flags & VM_NONLINEAR)) {
                        root = &mapping->i_mmap;
+                       uprobe_munmap(vma);
+
+                       if (adjust_next)
+                               uprobe_munmap(next);
+               }
+
                mutex_lock(&mapping->i_mmap_mutex);
                if (insert) {
                        /*
@@ -616,8 +623,16 @@ again:                     remove_next = 1 + (end > next->vm_end);
        if (mapping)
                mutex_unlock(&mapping->i_mmap_mutex);
 
+       if (root) {
+               uprobe_mmap(vma);
+
+               if (adjust_next)
+                       uprobe_mmap(next);
+       }
+
        if (remove_next) {
                if (file) {
+                       uprobe_munmap(next);
                        fput(file);
                        if (next->vm_flags & VM_EXECUTABLE)
                                removed_exe_file_vma(mm);
@@ -637,6 +652,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
                        goto again;
                }
        }
+       if (insert && file)
+               uprobe_mmap(insert);
 
        validate_mm(mm);
 
@@ -935,6 +952,19 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
 }
 #endif /* CONFIG_PROC_FS */
 
+/*
+ * If a hint addr is less than mmap_min_addr change hint to be as
+ * low as possible but still greater than mmap_min_addr
+ */
+static inline unsigned long round_hint_to_min(unsigned long hint)
+{
+       hint &= PAGE_MASK;
+       if (((void *)hint != NULL) &&
+           (hint < mmap_min_addr))
+               return PAGE_ALIGN(mmap_min_addr);
+       return hint;
+}
+
 /*
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
@@ -1099,9 +1129,9 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                 * A dummy user value is used because we are not locking
                 * memory so no accounting is necessary
                 */
-               len = ALIGN(len, huge_page_size(&default_hstate));
-               file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
-                                               &user, HUGETLB_ANONHUGE_INODE);
+               file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
+                                               VM_NORESERVE, &user,
+                                               HUGETLB_ANONHUGE_INODE);
                if (IS_ERR(file))
                        return PTR_ERR(file);
        }
@@ -1235,7 +1265,7 @@ munmap_back:
         */
        if (accountable_mapping(file, vm_flags)) {
                charged = len >> PAGE_SHIFT;
-               if (security_vm_enough_memory(charged))
+               if (security_vm_enough_memory_mm(mm, charged))
                        return -ENOMEM;
                vm_flags |= VM_ACCOUNT;
        }
@@ -1332,6 +1362,11 @@ out:
                        mm->locked_vm += (len >> PAGE_SHIFT);
        } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
                make_pages_present(addr, addr + len);
+
+       if (file && uprobe_mmap(vma))
+               /* matching probes but cannot insert */
+               goto unmap_and_free_vma;
+
        return addr;
 
 unmap_and_free_vma:
@@ -1426,10 +1461,8 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
        /*
         * Is this a new hole at the lowest possible address?
         */
-       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
+       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
                mm->free_area_cache = addr;
-               mm->cached_hole_size = ~0UL;
-       }
 }
 
 /*
@@ -1444,7 +1477,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 {
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
-       unsigned long addr = addr0;
+       unsigned long addr = addr0, start_addr;
 
        /* requested length too big for entire address space */
        if (len > TASK_SIZE)
@@ -1468,22 +1501,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                mm->free_area_cache = mm->mmap_base;
        }
 
+try_again:
        /* either no address requested or can't fit in requested address hole */
-       addr = mm->free_area_cache;
+       start_addr = addr = mm->free_area_cache;
 
-       /* make sure it can fit in the remaining address space */
-       if (addr > len) {
-               vma = find_vma(mm, addr-len);
-               if (!vma || addr <= vma->vm_start)
-                       /* remember the address as a hint for next time */
-                       return (mm->free_area_cache = addr-len);
-       }
-
-       if (mm->mmap_base < len)
-               goto bottomup;
-
-       addr = mm->mmap_base-len;
+       if (addr < len)
+               goto fail;
 
+       addr -= len;
        do {
                /*
                 * Lookup failure means no vma is above this address,
@@ -1503,7 +1528,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                addr = vma->vm_start-len;
        } while (len < vma->vm_start);
 
-bottomup:
+fail:
+       /*
+        * if hint left us with no space for the requested
+        * mapping then try again:
+        *
+        * Note: this is different with the case of bottomup
+        * which does the fully line-search, but we use find_vma
+        * here that causes some holes skipped.
+        */
+       if (start_addr != mm->mmap_base) {
+               mm->free_area_cache = mm->mmap_base;
+               mm->cached_hole_size = 0;
+               goto try_again;
+       }
+
        /*
         * A failed mmap() very likely causes application failure,
         * so fall back to the bottom-up function here. This scenario
@@ -2180,7 +2219,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;
 
-       if (security_vm_enough_memory(len >> PAGE_SHIFT))
+       if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        /* Can we just expand an old private anonymous mapping? */
@@ -2224,7 +2263,6 @@ void exit_mmap(struct mm_struct *mm)
        struct mmu_gather tlb;
        struct vm_area_struct *vma;
        unsigned long nr_accounted = 0;
-       unsigned long end;
 
        /* mm's last user has gone, and its about to be pulled down */
        mmu_notifier_release(mm);
@@ -2249,11 +2287,11 @@ void exit_mmap(struct mm_struct *mm)
        tlb_gather_mmu(&tlb, mm, 1);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
-       end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
+       unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
 
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
-       tlb_finish_mmu(&tlb, 0, end);
+       tlb_finish_mmu(&tlb, 0, -1);
 
        /*
         * Walk the list again, actually closing and freeing it,
@@ -2296,6 +2334,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
        if ((vma->vm_flags & VM_ACCOUNT) &&
             security_vm_enough_memory_mm(mm, vma_pages(vma)))
                return -ENOMEM;
+
+       if (vma->vm_file && uprobe_mmap(vma))
+               return -EINVAL;
+
        vma_link(mm, vma, prev, rb_link, rb_parent);
        return 0;
 }
@@ -2365,6 +2407,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                        new_vma->vm_pgoff = pgoff;
                        if (new_vma->vm_file) {
                                get_file(new_vma->vm_file);
+
+                               if (uprobe_mmap(new_vma))
+                                       goto out_free_mempol;
+
                                if (vma->vm_flags & VM_EXECUTABLE)
                                        added_exe_file_vma(mm);
                        }