1 #include <linux/slab.h>
2 #include <linux/file.h>
3 #include <linux/fdtable.h>
5 #include <linux/stat.h>
6 #include <linux/fcntl.h>
7 #include <linux/swap.h>
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/pagemap.h>
11 #include <linux/perf_event.h>
12 #include <linux/highmem.h>
13 #include <linux/spinlock.h>
14 #include <linux/key.h>
15 #include <linux/personality.h>
16 #include <linux/binfmts.h>
17 #include <linux/coredump.h>
18 #include <linux/utsname.h>
19 #include <linux/pid_namespace.h>
20 #include <linux/module.h>
21 #include <linux/namei.h>
22 #include <linux/mount.h>
23 #include <linux/security.h>
24 #include <linux/syscalls.h>
25 #include <linux/tsacct_kern.h>
26 #include <linux/cn_proc.h>
27 #include <linux/audit.h>
28 #include <linux/tracehook.h>
29 #include <linux/kmod.h>
30 #include <linux/fsnotify.h>
31 #include <linux/fs_struct.h>
32 #include <linux/pipe_fs_i.h>
33 #include <linux/oom.h>
34 #include <linux/compat.h>
36 #include <asm/uaccess.h>
37 #include <asm/mmu_context.h>
41 #include <trace/events/task.h>
45 #include <trace/events/sched.h>
48 char core_pattern[CORENAME_MAX_SIZE] = "core";
49 unsigned int core_pipe_limit;
55 static atomic_t call_count = ATOMIC_INIT(1);
57 /* The maximal length of core_pattern is also specified in sysctl.c */
59 static int expand_corename(struct core_name *cn)
61 char *old_corename = cn->corename;
63 cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
64 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
74 static int cn_printf(struct core_name *cn, const char *fmt, ...)
82 need = vsnprintf(NULL, 0, fmt, arg);
85 if (likely(need < cn->size - cn->used - 1))
88 ret = expand_corename(cn);
93 cur = cn->corename + cn->used;
95 vsnprintf(cur, need + 1, fmt, arg);
104 static void cn_escape(char *str)
111 static int cn_print_exe_file(struct core_name *cn)
113 struct file *exe_file;
114 char *pathbuf, *path;
117 exe_file = get_mm_exe_file(current->mm);
119 char *commstart = cn->corename + cn->used;
120 ret = cn_printf(cn, "%s (path unknown)", current->comm);
121 cn_escape(commstart);
125 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
131 path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
139 ret = cn_printf(cn, "%s", path);
148 /* format_corename will inspect the pattern parameter, and output a
149 * name into corename, which must have space for at least
150 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
152 static int format_corename(struct core_name *cn, long signr)
154 const struct cred *cred = current_cred();
155 const char *pat_ptr = core_pattern;
156 int ispipe = (*pat_ptr == '|');
157 int pid_in_pattern = 0;
160 cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
161 cn->corename = kmalloc(cn->size, GFP_KERNEL);
167 /* Repeat as long as we have more pattern to process and more output
170 if (*pat_ptr != '%') {
173 err = cn_printf(cn, "%c", *pat_ptr++);
175 switch (*++pat_ptr) {
176 /* single % at the end, drop that */
179 /* Double percent, output one percent */
181 err = cn_printf(cn, "%c", '%');
186 err = cn_printf(cn, "%d",
187 task_tgid_vnr(current));
191 err = cn_printf(cn, "%d", cred->uid);
195 err = cn_printf(cn, "%d", cred->gid);
197 /* signal that caused the coredump */
199 err = cn_printf(cn, "%ld", signr);
201 /* UNIX time of coredump */
204 do_gettimeofday(&tv);
205 err = cn_printf(cn, "%lu", tv.tv_sec);
210 char *namestart = cn->corename + cn->used;
212 err = cn_printf(cn, "%s",
213 utsname()->nodename);
215 cn_escape(namestart);
220 char *commstart = cn->corename + cn->used;
221 err = cn_printf(cn, "%s", current->comm);
222 cn_escape(commstart);
226 err = cn_print_exe_file(cn);
228 /* core limit size */
230 err = cn_printf(cn, "%lu",
231 rlimit(RLIMIT_CORE));
243 /* Backward compatibility with core_uses_pid:
245 * If core_pattern does not include a %p (as is the default)
246 * and core_uses_pid is set, then .%pid will be appended to
247 * the filename. Do not do this for piped commands. */
248 if (!ispipe && !pid_in_pattern && core_uses_pid) {
249 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
257 static int zap_process(struct task_struct *start, int exit_code)
259 struct task_struct *t;
262 start->signal->flags = SIGNAL_GROUP_EXIT;
263 start->signal->group_exit_code = exit_code;
264 start->signal->group_stop_count = 0;
268 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
269 if (t != current && t->mm) {
270 sigaddset(&t->pending.signal, SIGKILL);
271 signal_wake_up(t, 1);
274 } while_each_thread(start, t);
279 static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
280 struct core_state *core_state, int exit_code)
282 struct task_struct *g, *p;
286 spin_lock_irq(&tsk->sighand->siglock);
287 if (!signal_group_exit(tsk->signal)) {
288 mm->core_state = core_state;
289 nr = zap_process(tsk, exit_code);
291 spin_unlock_irq(&tsk->sighand->siglock);
292 if (unlikely(nr < 0))
295 if (atomic_read(&mm->mm_users) == nr + 1)
298 * We should find and kill all tasks which use this mm, and we should
299 * count them correctly into ->nr_threads. We don't take tasklist
300 * lock, but this is safe wrt:
303 * None of sub-threads can fork after zap_process(leader). All
304 * processes which were created before this point should be
305 * visible to zap_threads() because copy_process() adds the new
306 * process to the tail of init_task.tasks list, and lock/unlock
307 * of ->siglock provides a memory barrier.
310 * The caller holds mm->mmap_sem. This means that the task which
311 * uses this mm can't pass exit_mm(), so it can't exit or clear
315 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
316 * we must see either old or new leader, this does not matter.
317 * However, it can change p->sighand, so lock_task_sighand(p)
318 * must be used. Since p->mm != NULL and we hold ->mmap_sem
321 * Note also that "g" can be the old leader with ->mm == NULL
322 * and already unhashed and thus removed from ->thread_group.
323 * This is OK, __unhash_process()->list_del_rcu() does not
324 * clear the ->next pointer, we will find the new leader via
328 for_each_process(g) {
329 if (g == tsk->group_leader)
331 if (g->flags & PF_KTHREAD)
336 if (unlikely(p->mm == mm)) {
337 lock_task_sighand(p, &flags);
338 nr += zap_process(p, exit_code);
339 unlock_task_sighand(p, &flags);
343 } while_each_thread(g, p);
347 atomic_set(&core_state->nr_threads, nr);
351 static int coredump_wait(int exit_code, struct core_state *core_state)
353 struct task_struct *tsk = current;
354 struct mm_struct *mm = tsk->mm;
355 int core_waiters = -EBUSY;
357 init_completion(&core_state->startup);
358 core_state->dumper.task = tsk;
359 core_state->dumper.next = NULL;
361 down_write(&mm->mmap_sem);
363 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
364 up_write(&mm->mmap_sem);
366 if (core_waiters > 0) {
367 struct core_thread *ptr;
369 wait_for_completion(&core_state->startup);
371 * Wait for all the threads to become inactive, so that
372 * all the thread context (extended register state, like
373 * fpu etc) gets copied to the memory.
375 ptr = core_state->dumper.next;
376 while (ptr != NULL) {
377 wait_task_inactive(ptr->task, 0);
385 static void coredump_finish(struct mm_struct *mm)
387 struct core_thread *curr, *next;
388 struct task_struct *task;
390 next = mm->core_state->dumper.next;
391 while ((curr = next) != NULL) {
395 * see exit_mm(), curr->task must not see
396 * ->task == NULL before we read ->next.
400 wake_up_process(task);
403 mm->core_state = NULL;
406 static void wait_for_dump_helpers(struct file *file)
408 struct pipe_inode_info *pipe;
410 pipe = file->f_path.dentry->d_inode->i_pipe;
416 while ((pipe->readers > 1) && (!signal_pending(current))) {
417 wake_up_interruptible_sync(&pipe->wait);
418 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
430 * helper function to customize the process used
431 * to collect the core in userspace. Specifically
432 * it sets up a pipe and installs it as fd 0 (stdin)
433 * for the process. Returns 0 on success, or
434 * PTR_ERR on failure.
435 * Note that it also sets the core limit to 1. This
436 * is a special value that we use to trap recursive
439 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
441 struct file *files[2];
442 struct coredump_params *cp = (struct coredump_params *)info->data;
443 int err = create_pipe_files(files, 0);
449 replace_fd(0, files[0], 0);
450 /* and disallow core files too */
451 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
456 void do_coredump(long signr, int exit_code, struct pt_regs *regs)
458 struct core_state core_state;
460 struct mm_struct *mm = current->mm;
461 struct linux_binfmt * binfmt;
462 const struct cred *old_cred;
467 struct files_struct *displaced;
468 bool need_nonrelative = false;
469 static atomic_t core_dump_count = ATOMIC_INIT(0);
470 struct coredump_params cprm = {
473 .limit = rlimit(RLIMIT_CORE),
475 * We must use the same mm->flags while dumping core to avoid
476 * inconsistency of bit flags, since this flag is not protected
479 .mm_flags = mm->flags,
482 audit_core_dumps(signr);
485 if (!binfmt || !binfmt->core_dump)
487 if (!__get_dumpable(cprm.mm_flags))
490 cred = prepare_creds();
494 * We cannot trust fsuid as being the "true" uid of the process
495 * nor do we know its entire history. We only know it was tainted
496 * so we dump it as root in mode 2, and only into a controlled
497 * environment (pipe handler or fully qualified path).
499 if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
500 /* Setuid core dump mode */
501 flag = O_EXCL; /* Stop rewrite attacks */
502 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
503 need_nonrelative = true;
506 retval = coredump_wait(exit_code, &core_state);
510 old_cred = override_creds(cred);
513 * Clear any false indication of pending signals that might
514 * be seen by the filesystem code called to write the core file.
516 clear_thread_flag(TIF_SIGPENDING);
518 ispipe = format_corename(&cn, signr);
525 printk(KERN_WARNING "format_corename failed\n");
526 printk(KERN_WARNING "Aborting core\n");
530 if (cprm.limit == 1) {
531 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
533 * Normally core limits are irrelevant to pipes, since
534 * we're not writing to the file system, but we use
535 * cprm.limit of 1 here as a speacial value, this is a
536 * consistent way to catch recursive crashes.
537 * We can still crash if the core_pattern binary sets
538 * RLIM_CORE = !1, but it runs as root, and can do
539 * lots of stupid things.
541 * Note that we use task_tgid_vnr here to grab the pid
542 * of the process group leader. That way we get the
543 * right pid if a thread in a multi-threaded
544 * core_pattern process dies.
547 "Process %d(%s) has RLIMIT_CORE set to 1\n",
548 task_tgid_vnr(current), current->comm);
549 printk(KERN_WARNING "Aborting core\n");
552 cprm.limit = RLIM_INFINITY;
554 dump_count = atomic_inc_return(&core_dump_count);
555 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
556 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
557 task_tgid_vnr(current), current->comm);
558 printk(KERN_WARNING "Skipping core dump\n");
562 helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
564 printk(KERN_WARNING "%s failed to allocate memory\n",
569 retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
570 NULL, UMH_WAIT_EXEC, umh_pipe_setup,
572 argv_free(helper_argv);
574 printk(KERN_INFO "Core dump to %s pipe failed\n",
581 if (cprm.limit < binfmt->min_coredump)
584 if (need_nonrelative && cn.corename[0] != '/') {
585 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
586 "to fully qualified path!\n",
587 task_tgid_vnr(current), current->comm);
588 printk(KERN_WARNING "Skipping core dump\n");
592 cprm.file = filp_open(cn.corename,
593 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
595 if (IS_ERR(cprm.file))
598 inode = cprm.file->f_path.dentry->d_inode;
599 if (inode->i_nlink > 1)
601 if (d_unhashed(cprm.file->f_path.dentry))
604 * AK: actually i see no reason to not allow this for named
605 * pipes etc, but keep the previous behaviour for now.
607 if (!S_ISREG(inode->i_mode))
610 * Dont allow local users get cute and trick others to coredump
611 * into their pre-created files.
613 if (!uid_eq(inode->i_uid, current_fsuid()))
615 if (!cprm.file->f_op || !cprm.file->f_op->write)
617 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
621 /* get us an unshared descriptor table; almost always a no-op */
622 retval = unshare_files(&displaced);
626 put_files_struct(displaced);
627 retval = binfmt->core_dump(&cprm);
629 current->signal->group_exit_code |= 0x80;
631 if (ispipe && core_pipe_limit)
632 wait_for_dump_helpers(cprm.file);
635 filp_close(cprm.file, NULL);
638 atomic_dec(&core_dump_count);
643 revert_creds(old_cred);
651 * Core dumping helper functions. These are the only things you should
652 * do on a core-file: use only these functions to write out all the
655 int dump_write(struct file *file, const void *addr, int nr)
657 return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
659 EXPORT_SYMBOL(dump_write);
661 int dump_seek(struct file *file, loff_t off)
665 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
666 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
669 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
674 unsigned long n = off;
678 if (!dump_write(file, buf, n)) {
684 free_page((unsigned long)buf);
688 EXPORT_SYMBOL(dump_seek);