NFSv4.1: Ensure state manager thread dies on last umount
[firefly-linux-kernel-4.4.55.git] / fs / exec.c
index 172ceb6edde4df6ff8520cd9951b3bc2ca86b2c2..0cf881dbd94503ab86f81b0fdffc6caebca69dd8 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -33,7 +33,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 #include <linux/key.h>
@@ -55,6 +55,7 @@
 #include <linux/kmod.h>
 #include <linux/fsnotify.h>
 #include <linux/fs_struct.h>
+#include <linux/pipe_fs_i.h>
 
 #include <asm/uaccess.h>
 #include <asm/mmu_context.h>
@@ -63,6 +64,7 @@
 
 int core_uses_pid;
 char core_pattern[CORENAME_MAX_SIZE] = "core";
+unsigned int core_pipe_limit;
 int suid_dumpable = 0;
 
 /* The maximal length of core_pattern is also specified in sysctl.c */
@@ -157,7 +159,22 @@ out:
 
 #ifdef CONFIG_MMU
 
-static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
+{
+       struct mm_struct *mm = current->mm;
+       long diff = (long)(pages - bprm->vma_pages);
+
+       if (!mm || !diff)
+               return;
+
+       bprm->vma_pages = pages;
+
+       down_write(&mm->mmap_sem);
+       mm->total_vm += diff;
+       up_write(&mm->mmap_sem);
+}
+
+struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                int write)
 {
        struct page *page;
@@ -179,6 +196,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
                struct rlimit *rlim;
 
+               acct_arg_size(bprm, size / PAGE_SIZE);
+
                /*
                 * We've historically supported up to 32 pages (ARG_MAX)
                 * of argument strings even with small stacks
@@ -245,6 +264,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_start = vma->vm_end - PAGE_SIZE;
        vma->vm_flags = VM_STACK_FLAGS;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+       err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
+       if (err)
+               goto err;
+
        err = insert_vm_struct(mm, vma);
        if (err)
                goto err;
@@ -267,7 +291,11 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len)
 
 #else
 
-static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
+{
+}
+
+struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
                int write)
 {
        struct page *page;
@@ -374,6 +402,9 @@ static int count(char __user * __user * argv, int max)
                        argv++;
                        if (i++ >= max)
                                return -E2BIG;
+
+                       if (fatal_signal_pending(current))
+                               return -ERESTARTNOHAND;
                        cond_resched();
                }
        }
@@ -417,6 +448,12 @@ static int copy_strings(int argc, char __user * __user * argv,
                while (len > 0) {
                        int offset, bytes_to_copy;
 
+                       if (fatal_signal_pending(current)) {
+                               ret = -ERESTARTNOHAND;
+                               goto out;
+                       }
+                       cond_resched();
+
                        offset = pos % PAGE_SIZE;
                        if (offset == 0)
                                offset = PAGE_SIZE;
@@ -570,6 +607,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
        struct vm_area_struct *prev = NULL;
        unsigned long vm_flags;
        unsigned long stack_base;
+       unsigned long stack_size;
+       unsigned long stack_expand;
+       unsigned long rlim_stack;
 
 #ifdef CONFIG_STACK_GROWSUP
        /* Limit stack size to 1GB */
@@ -589,6 +629,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
 #else
        stack_top = arch_align_stack(stack_top);
        stack_top = PAGE_ALIGN(stack_top);
+
+       if (unlikely(stack_top < mmap_min_addr) ||
+           unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+               return -ENOMEM;
+
        stack_shift = vma->vm_end - stack_top;
 
        bprm->p -= stack_shift;
@@ -622,16 +667,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
        /* Move stack pages down in memory. */
        if (stack_shift) {
                ret = shift_arg_pages(vma, stack_shift);
-               if (ret) {
-                       up_write(&mm->mmap_sem);
-                       return ret;
-               }
+               if (ret)
+                       goto out_unlock;
        }
 
+       stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+       stack_size = vma->vm_end - vma->vm_start;
+       /*
+        * Align this down to a page boundary as expand_stack
+        * will align it up.
+        */
+       rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
 #ifdef CONFIG_STACK_GROWSUP
-       stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+       if (stack_size + stack_expand > rlim_stack)
+               stack_base = vma->vm_start + rlim_stack;
+       else
+               stack_base = vma->vm_end + stack_expand;
 #else
-       stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+       if (stack_size + stack_expand > rlim_stack)
+               stack_base = vma->vm_end - rlim_stack;
+       else
+               stack_base = vma->vm_start - stack_expand;
 #endif
        ret = expand_stack(vma, stack_base);
        if (ret)
@@ -639,7 +695,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
 
 out_unlock:
        up_write(&mm->mmap_sem);
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(setup_arg_pages);
 
@@ -845,6 +901,9 @@ static int de_thread(struct task_struct *tsk)
        sig->notify_count = 0;
 
 no_thread_group:
+       if (current->mm)
+               setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
+
        exit_itimers(sig);
        flush_itimer_signals();
 
@@ -923,14 +982,12 @@ void set_task_comm(struct task_struct *tsk, char *buf)
        task_lock(tsk);
        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
        task_unlock(tsk);
-       perf_counter_comm(tsk);
+       perf_event_comm(tsk);
 }
 
 int flush_old_exec(struct linux_binprm * bprm)
 {
-       char * name;
-       int i, ch, retval;
-       char tcomm[sizeof(current->comm)];
+       int retval;
 
        /*
         * Make sure we have a private signal table and that
@@ -945,12 +1002,32 @@ int flush_old_exec(struct linux_binprm * bprm)
        /*
         * Release all of the old mmap stuff
         */
+       acct_arg_size(bprm, 0);
        retval = exec_mmap(bprm->mm);
        if (retval)
                goto out;
 
        bprm->mm = NULL;                /* We're using it now */
 
+       current->flags &= ~PF_RANDOMIZE;
+       flush_thread();
+       current->personality &= ~bprm->per_clear;
+
+       return 0;
+
+out:
+       return retval;
+}
+EXPORT_SYMBOL(flush_old_exec);
+
+void setup_new_exec(struct linux_binprm * bprm)
+{
+       int i, ch;
+       char * name;
+       char tcomm[sizeof(current->comm)];
+
+       arch_pick_mmap_layout(current->mm);
+
        /* This is the point of no return */
        current->sas_ss_sp = current->sas_ss_size = 0;
 
@@ -972,9 +1049,6 @@ int flush_old_exec(struct linux_binprm * bprm)
        tcomm[i] = '\0';
        set_task_comm(current, tcomm);
 
-       current->flags &= ~PF_RANDOMIZE;
-       flush_thread();
-
        /* Set the new mm task size. We have to do that late because it may
         * depend on TIF_32BIT which is only updated in flush_thread() on
         * some architectures like powerpc
@@ -990,14 +1064,12 @@ int flush_old_exec(struct linux_binprm * bprm)
                set_dumpable(current->mm, suid_dumpable);
        }
 
-       current->personality &= ~bprm->per_clear;
-
        /*
         * Flush performance counters when crossing a
         * security domain:
         */
        if (!get_dumpable(current->mm))
-               perf_counter_exit_task(current);
+               perf_event_exit_task(current);
 
        /* An exec changes our domain. We are no longer part of the thread
           group */
@@ -1006,14 +1078,8 @@ int flush_old_exec(struct linux_binprm * bprm)
                        
        flush_signal_handlers(current, 0);
        flush_old_files(current->files);
-
-       return 0;
-
-out:
-       return retval;
 }
-
-EXPORT_SYMBOL(flush_old_exec);
+EXPORT_SYMBOL(setup_new_exec);
 
 /*
  * Prepare credentials and lock ->cred_guard_mutex.
@@ -1364,8 +1430,10 @@ int do_execve(char * filename,
        return retval;
 
 out:
-       if (bprm->mm)
-               mmput (bprm->mm);
+       if (bprm->mm) {
+               acct_arg_size(bprm, 0);
+               mmput(bprm->mm);
+       }
 
 out_file:
        if (bprm->file) {
@@ -1388,18 +1456,16 @@ out_ret:
        return retval;
 }
 
-int set_binfmt(struct linux_binfmt *new)
+void set_binfmt(struct linux_binfmt *new)
 {
-       struct linux_binfmt *old = current->binfmt;
+       struct mm_struct *mm = current->mm;
 
-       if (new) {
-               if (!try_module_get(new->module))
-                       return -1;
-       }
-       current->binfmt = new;
-       if (old)
-               module_put(old->module);
-       return 0;
+       if (mm->binfmt)
+               module_put(mm->binfmt->module);
+
+       mm->binfmt = new;
+       if (new)
+               __module_get(new->module);
 }
 
 EXPORT_SYMBOL(set_binfmt);
@@ -1723,6 +1789,29 @@ int get_dumpable(struct mm_struct *mm)
        return (ret >= 2) ? 2 : ret;
 }
 
+static void wait_for_dump_helpers(struct file *file)
+{
+       struct pipe_inode_info *pipe;
+
+       pipe = file->f_path.dentry->d_inode->i_pipe;
+
+       pipe_lock(pipe);
+       pipe->readers++;
+       pipe->writers--;
+
+       while ((pipe->readers > 1) && (!signal_pending(current))) {
+               wake_up_interruptible_sync(&pipe->wait);
+               kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
+               pipe_wait(pipe);
+       }
+
+       pipe->readers--;
+       pipe->writers++;
+       pipe_unlock(pipe);
+
+}
+
+
 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
 {
        struct core_state core_state;
@@ -1739,11 +1828,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
        unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
        char **helper_argv = NULL;
        int helper_argc = 0;
-       char *delimit;
+       int dump_count = 0;
+       static atomic_t core_dump_count = ATOMIC_INIT(0);
 
        audit_core_dumps(signr);
 
-       binfmt = current->binfmt;
+       binfmt = mm->binfmt;
        if (!binfmt || !binfmt->core_dump)
                goto fail;
 
@@ -1794,54 +1884,63 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
        lock_kernel();
        ispipe = format_corename(corename, signr);
        unlock_kernel();
-       /*
-        * Don't bother to check the RLIMIT_CORE value if core_pattern points
-        * to a pipe.  Since we're not writing directly to the filesystem
-        * RLIMIT_CORE doesn't really apply, as no actual core file will be
-        * created unless the pipe reader choses to write out the core file
-        * at which point file size limits and permissions will be imposed
-        * as it does with any other process
-        */
+
        if ((!ispipe) && (core_limit < binfmt->min_coredump))
                goto fail_unlock;
 
        if (ispipe) {
+               if (core_limit == 0) {
+                       /*
+                        * Normally core limits are irrelevant to pipes, since
+                        * we're not writing to the file system, but we use
+                        * core_limit of 0 here as a speacial value. Any
+                        * non-zero limit gets set to RLIM_INFINITY below, but
+                        * a limit of 0 skips the dump.  This is a consistent
+                        * way to catch recursive crashes.  We can still crash
+                        * if the core_pattern binary sets RLIM_CORE =  !0
+                        * but it runs as root, and can do lots of stupid things
+                        * Note that we use task_tgid_vnr here to grab the pid
+                        * of the process group leader.  That way we get the
+                        * right pid if a thread in a multi-threaded
+                        * core_pattern process dies.
+                        */
+                       printk(KERN_WARNING
+                               "Process %d(%s) has RLIMIT_CORE set to 0\n",
+                               task_tgid_vnr(current), current->comm);
+                       printk(KERN_WARNING "Aborting core\n");
+                       goto fail_unlock;
+               }
+
+               dump_count = atomic_inc_return(&core_dump_count);
+               if (core_pipe_limit && (core_pipe_limit < dump_count)) {
+                       printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
+                              task_tgid_vnr(current), current->comm);
+                       printk(KERN_WARNING "Skipping core dump\n");
+                       goto fail_dropcount;
+               }
+
                helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
                if (!helper_argv) {
                        printk(KERN_WARNING "%s failed to allocate memory\n",
                               __func__);
-                       goto fail_unlock;
-               }
-               /* Terminate the string before the first option */
-               delimit = strchr(corename, ' ');
-               if (delimit)
-                       *delimit = '\0';
-               delimit = strrchr(helper_argv[0], '/');
-               if (delimit)
-                       delimit++;
-               else
-                       delimit = helper_argv[0];
-               if (!strcmp(delimit, current->comm)) {
-                       printk(KERN_NOTICE "Recursive core dump detected, "
-                                       "aborting\n");
-                       goto fail_unlock;
+                       goto fail_dropcount;
                }
 
                core_limit = RLIM_INFINITY;
 
                /* SIGPIPE can happen, but it's just never processed */
-               if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
+               if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
                                &file)) {
                        printk(KERN_INFO "Core dump to %s pipe failed\n",
                               corename);
-                       goto fail_unlock;
+                       goto fail_dropcount;
                }
        } else
                file = filp_open(corename,
                                 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
                                 0600);
        if (IS_ERR(file))
-               goto fail_unlock;
+               goto fail_dropcount;
        inode = file->f_path.dentry->d_inode;
        if (inode->i_nlink > 1)
                goto close_fail;        /* multiple links - don't dump */
@@ -1855,8 +1954,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
        /*
         * Dont allow local users get cute and trick others to coredump
         * into their pre-created files:
+        * Note, this is not relevant for pipes
         */
-       if (inode->i_uid != current_fsuid())
+       if (!ispipe && (inode->i_uid != current_fsuid()))
                goto close_fail;
        if (!file->f_op)
                goto close_fail;
@@ -1870,7 +1970,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
        if (retval)
                current->signal->group_exit_code |= 0x80;
 close_fail:
+       if (ispipe && core_pipe_limit)
+               wait_for_dump_helpers(file);
        filp_close(file, NULL);
+fail_dropcount:
+       if (dump_count)
+               atomic_dec(&core_dump_count);
 fail_unlock:
        if (helper_argv)
                argv_free(helper_argv);