uprobes: Suppress uprobe_munmap() from mmput()
[firefly-linux-kernel-4.4.55.git] / kernel / events / uprobes.c
index 4e0db3496d706ebcd0f2cfc1186835a22a7126fe..9db9cdf8ff346d00cb682068544ae3307d3b5116 100644 (file)
@@ -44,6 +44,23 @@ static DEFINE_SPINLOCK(uprobes_treelock);    /* serialize rbtree access */
 
 #define UPROBES_HASH_SZ        13
 
+/*
+ * We need separate register/unregister and mmap/munmap lock hashes because
+ * of mmap_sem nesting.
+ *
+ * uprobe_register() needs to install probes on (potentially) all processes
+ * and thus needs to acquire multiple mmap_sems (consequtively, not
+ * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
+ * for the particular process doing the mmap.
+ *
+ * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
+ * because of lock order against i_mmap_mutex. This means there's a hole in
+ * the register vma iteration where a mmap() can happen.
+ *
+ * Thus uprobe_register() can race with uprobe_mmap() and we can try and
+ * install a probe where one is already installed.
+ */
+
 /* serialize (un)register */
 static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
 
@@ -110,25 +127,27 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
  * based on replace_page in mm/ksm.c
  *
  * @vma:      vma that holds the pte pointing to page
+ * @addr:     address the old @page is mapped at
  * @page:     the cowed page we are replacing by kpage
  * @kpage:    the modified page we replace page by
  *
  * Returns 0 on success, -EFAULT on failure.
  */
-static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
+static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+                               struct page *page, struct page *kpage)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long addr;
        spinlock_t *ptl;
        pte_t *ptep;
+       int err;
 
-       addr = page_address_in_vma(page, vma);
-       if (addr == -EFAULT)
-               return -EFAULT;
+       /* freeze PageSwapCache() for try_to_free_swap() below */
+       lock_page(page);
 
+       err = -EAGAIN;
        ptep = page_check_address(page, mm, addr, &ptl, 0);
        if (!ptep)
-               return -EAGAIN;
+               goto unlock;
 
        get_page(kpage);
        page_add_new_anon_rmap(kpage, vma, addr);
@@ -148,7 +167,10 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct
        put_page(page);
        pte_unmap_unlock(ptep, ptl);
 
-       return 0;
+       err = 0;
+ unlock:
+       unlock_page(page);
+       return err;
 }
 
 /**
@@ -189,78 +211,42 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
                        unsigned long vaddr, uprobe_opcode_t opcode)
 {
        struct page *old_page, *new_page;
-       struct address_space *mapping;
        void *vaddr_old, *vaddr_new;
        struct vm_area_struct *vma;
-       struct uprobe *uprobe;
-       unsigned long pgoff;
-       loff_t addr;
        int ret;
+
 retry:
        /* Read the page with vaddr into memory */
        ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
        if (ret <= 0)
                return ret;
 
-       ret = -EINVAL;
-
-       /*
-        * We are interested in text pages only. Our pages of interest
-        * should be mapped for read and execute only. We desist from
-        * adding probes in write mapped pages since the breakpoints
-        * might end up in the file copy.
-        */
-       if (!valid_vma(vma, is_swbp_insn(&opcode)))
-               goto put_out;
-
-       uprobe = container_of(auprobe, struct uprobe, arch);
-       mapping = uprobe->inode->i_mapping;
-       if (mapping != vma->vm_file->f_mapping)
-               goto put_out;
-
-       addr = vma_address(vma, uprobe->offset);
-       if (vaddr != (unsigned long)addr)
-               goto put_out;
-
        ret = -ENOMEM;
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
        if (!new_page)
-               goto put_out;
+               goto put_old;
 
        __SetPageUptodate(new_page);
 
-       /*
-        * lock page will serialize against do_wp_page()'s
-        * PageAnon() handling
-        */
-       lock_page(old_page);
        /* copy the page now that we've got it stable */
        vaddr_old = kmap_atomic(old_page);
        vaddr_new = kmap_atomic(new_page);
 
        memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
-
-       /* poke the new insn in, ASSUMES we don't cross page boundary */
-       pgoff = (vaddr & ~PAGE_MASK);
-       BUG_ON(pgoff + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
-       memcpy(vaddr_new + pgoff, &opcode, UPROBE_SWBP_INSN_SIZE);
+       memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
 
        kunmap_atomic(vaddr_new);
        kunmap_atomic(vaddr_old);
 
        ret = anon_vma_prepare(vma);
        if (ret)
-               goto unlock_out;
+               goto put_new;
 
-       lock_page(new_page);
-       ret = __replace_page(vma, old_page, new_page);
-       unlock_page(new_page);
+       ret = __replace_page(vma, vaddr, old_page, new_page);
 
-unlock_out:
-       unlock_page(old_page);
+put_new:
        page_cache_release(new_page);
-
-put_out:
+put_old:
        put_page(old_page);
 
        if (unlikely(ret == -EAGAIN))
@@ -339,7 +325,9 @@ out:
 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
 {
        int result;
-
+       /*
+        * See the comment near uprobes_hash().
+        */
        result = is_swbp_at_addr(mm, vaddr);
        if (result == 1)
                return -EEXIST;
@@ -504,7 +492,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
        uprobe->inode = igrab(inode);
        uprobe->offset = offset;
        init_rwsem(&uprobe->consumer_rwsem);
-       INIT_LIST_HEAD(&uprobe->pending_list);
 
        /* add to uprobes_tree, sorted on inode:offset */
        cur_uprobe = insert_uprobe(uprobe);
@@ -572,14 +559,13 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
 }
 
 static int
-__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
-                       unsigned long nbytes, unsigned long offset)
+__copy_insn(struct address_space *mapping, struct file *filp, char *insn,
+                       unsigned long nbytes, loff_t offset)
 {
-       struct file *filp = vma->vm_file;
        struct page *page;
        void *vaddr;
-       unsigned long off1;
-       unsigned long idx;
+       unsigned long off;
+       pgoff_t idx;
 
        if (!filp)
                return -EINVAL;
@@ -587,8 +573,8 @@ __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *ins
        if (!mapping->a_ops->readpage)
                return -EIO;
 
-       idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
-       off1 = offset &= ~PAGE_MASK;
+       idx = offset >> PAGE_CACHE_SHIFT;
+       off = offset & ~PAGE_MASK;
 
        /*
         * Ensure that the page that has the original instruction is
@@ -599,22 +585,20 @@ __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *ins
                return PTR_ERR(page);
 
        vaddr = kmap_atomic(page);
-       memcpy(insn, vaddr + off1, nbytes);
+       memcpy(insn, vaddr + off, nbytes);
        kunmap_atomic(vaddr);
        page_cache_release(page);
 
        return 0;
 }
 
-static int
-copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
+static int copy_insn(struct uprobe *uprobe, struct file *filp)
 {
        struct address_space *mapping;
        unsigned long nbytes;
        int bytes;
 
-       addr &= ~PAGE_MASK;
-       nbytes = PAGE_SIZE - addr;
+       nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
        mapping = uprobe->inode->i_mapping;
 
        /* Instruction at end of binary; copy only available bytes */
@@ -625,13 +609,13 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
 
        /* Instruction at the page-boundary; copy bytes in second page */
        if (nbytes < bytes) {
-               if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
-                               bytes - nbytes, uprobe->offset + nbytes))
-                       return -ENOMEM;
-
+               int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
+                               bytes - nbytes, uprobe->offset + nbytes);
+               if (err)
+                       return err;
                bytes = nbytes;
        }
-       return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
+       return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
 }
 
 /*
@@ -659,9 +643,8 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
  */
 static int
 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
-                       struct vm_area_struct *vma, loff_t vaddr)
+                       struct vm_area_struct *vma, unsigned long vaddr)
 {
-       unsigned long addr;
        int ret;
 
        /*
@@ -674,20 +657,22 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
        if (!uprobe->consumers)
                return -EEXIST;
 
-       addr = (unsigned long)vaddr;
-
        if (!(uprobe->flags & UPROBE_COPY_INSN)) {
-               ret = copy_insn(uprobe, vma, addr);
+               ret = copy_insn(uprobe, vma->vm_file);
                if (ret)
                        return ret;
 
                if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
                        return -ENOTSUPP;
 
-               ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, addr);
+               ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
                if (ret)
                        return ret;
 
+               /* write_opcode() assumes we don't cross page boundary */
+               BUG_ON((uprobe->offset & ~PAGE_MASK) +
+                               UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+
                uprobe->flags |= UPROBE_COPY_INSN;
        }
 
@@ -700,7 +685,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
         * Hence increment before and decrement on failure.
         */
        atomic_inc(&mm->uprobes_state.count);
-       ret = set_swbp(&uprobe->arch, mm, addr);
+       ret = set_swbp(&uprobe->arch, mm, vaddr);
        if (ret)
                atomic_dec(&mm->uprobes_state.count);
 
@@ -708,9 +693,9 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 }
 
 static void
-remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
+remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 {
-       if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
+       if (!set_orig_insn(&uprobe->arch, mm, vaddr, true))
                atomic_dec(&mm->uprobes_state.count);
 }
 
@@ -734,7 +719,7 @@ static void delete_uprobe(struct uprobe *uprobe)
 struct map_info {
        struct map_info *next;
        struct mm_struct *mm;
-       loff_t vaddr;
+       unsigned long vaddr;
 };
 
 static inline struct map_info *free_map_info(struct map_info *info)
@@ -761,6 +746,16 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
                if (!valid_vma(vma, is_register))
                        continue;
 
+               if (!prev && !more) {
+                       /*
+                        * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
+                        * reclaim. This is optimistic, no harm done if it fails.
+                        */
+                       prev = kmalloc(sizeof(struct map_info),
+                                       GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
+                       if (prev)
+                               prev->next = NULL;
+               }
                if (!prev) {
                        more++;
                        continue;
@@ -818,7 +813,6 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
        while (info) {
                struct mm_struct *mm = info->mm;
                struct vm_area_struct *vma;
-               loff_t vaddr;
 
                if (err)
                        goto free;
@@ -828,13 +822,16 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
                if (!vma || !valid_vma(vma, is_register))
                        goto unlock;
 
-               vaddr = vma_address(vma, uprobe->offset);
                if (vma->vm_file->f_mapping->host != uprobe->inode ||
-                                               vaddr != info->vaddr)
+                   vma_address(vma, uprobe->offset) != info->vaddr)
                        goto unlock;
 
                if (is_register) {
                        err = install_breakpoint(uprobe, mm, vma, info->vaddr);
+                       /*
+                        * We can race against uprobe_mmap(), see the
+                        * comment near uprobe_hash().
+                        */
                        if (err == -EEXIST)
                                err = 0;
                } else {
@@ -1032,11 +1029,8 @@ int uprobe_mmap(struct vm_area_struct *vma)
        count = 0;
 
        list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               loff_t vaddr;
-
-               list_del(&uprobe->pending_list);
                if (!ret) {
-                       vaddr = vma_address(vma, uprobe->offset);
+                       loff_t vaddr = vma_address(vma, uprobe->offset);
 
                        if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
                                put_uprobe(uprobe);
@@ -1044,8 +1038,10 @@ int uprobe_mmap(struct vm_area_struct *vma)
                        }
 
                        ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
-
-                       /* Ignore double add: */
+                       /*
+                        * We can race against uprobe_register(), see the
+                        * comment near uprobe_hash().
+                        */
                        if (ret == -EEXIST) {
                                ret = 0;
 
@@ -1086,6 +1082,9 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
        if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
                return;
 
+       if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
+               return;
+
        if (!atomic_read(&vma->vm_mm->uprobes_state.count))
                return;
 
@@ -1098,10 +1097,7 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
        build_probe_list(inode, &tmp_list);
 
        list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               loff_t vaddr;
-
-               list_del(&uprobe->pending_list);
-               vaddr = vma_address(vma, uprobe->offset);
+               loff_t vaddr = vma_address(vma, uprobe->offset);
 
                if (vaddr >= start && vaddr < end) {
                        /*
@@ -1378,7 +1374,6 @@ static struct uprobe_task *add_utask(void)
        if (unlikely(!utask))
                return NULL;
 
-       utask->active_uprobe = NULL;
        current->utask = utask;
        return utask;
 }