2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #include <linux/user_namespace.h>
32 #include <linux/uprobes.h>
33 #define CREATE_TRACE_POINTS
34 #include <trace/events/signal.h>
36 #include <asm/param.h>
37 #include <asm/uaccess.h>
38 #include <asm/unistd.h>
39 #include <asm/siginfo.h>
40 #include <asm/cacheflush.h>
41 #include "audit.h" /* audit_signal_info() */
44 * SLAB caches for signal bits.
47 static struct kmem_cache *sigqueue_cachep;
49 int print_fatal_signals __read_mostly;
51 static void __user *sig_handler(struct task_struct *t, int sig)
53 return t->sighand->action[sig - 1].sa.sa_handler;
56 static int sig_handler_ignored(void __user *handler, int sig)
58 /* Is it explicitly or implicitly ignored? */
59 return handler == SIG_IGN ||
60 (handler == SIG_DFL && sig_kernel_ignore(sig));
63 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
67 handler = sig_handler(t, sig);
69 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
70 handler == SIG_DFL && !force)
73 return sig_handler_ignored(handler, sig);
76 static int sig_ignored(struct task_struct *t, int sig, bool force)
79 * Blocked signals are never ignored, since the
80 * signal handler may change by the time it is
83 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
86 if (!sig_task_ignored(t, sig, force))
90 * Tracers may want to know about even ignored signals.
96 * Re-calculate pending state from the set of locally pending
97 * signals, globally pending signals, and blocked signals.
99 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
104 switch (_NSIG_WORDS) {
106 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
107 ready |= signal->sig[i] &~ blocked->sig[i];
110 case 4: ready = signal->sig[3] &~ blocked->sig[3];
111 ready |= signal->sig[2] &~ blocked->sig[2];
112 ready |= signal->sig[1] &~ blocked->sig[1];
113 ready |= signal->sig[0] &~ blocked->sig[0];
116 case 2: ready = signal->sig[1] &~ blocked->sig[1];
117 ready |= signal->sig[0] &~ blocked->sig[0];
120 case 1: ready = signal->sig[0] &~ blocked->sig[0];
125 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
127 static int recalc_sigpending_tsk(struct task_struct *t)
129 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
130 PENDING(&t->pending, &t->blocked) ||
131 PENDING(&t->signal->shared_pending, &t->blocked)) {
132 set_tsk_thread_flag(t, TIF_SIGPENDING);
136 * We must never clear the flag in another thread, or in current
137 * when it's possible the current syscall is returning -ERESTART*.
138 * So we don't clear it here, and only callers who know they should do.
144 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
145 * This is superfluous when called on current, the wakeup is a harmless no-op.
147 void recalc_sigpending_and_wake(struct task_struct *t)
149 if (recalc_sigpending_tsk(t))
150 signal_wake_up(t, 0);
153 void recalc_sigpending(void)
155 if (!recalc_sigpending_tsk(current) && !freezing(current))
156 clear_thread_flag(TIF_SIGPENDING);
160 /* Given the mask, find the first available signal that should be serviced. */
162 #define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
166 int next_signal(struct sigpending *pending, sigset_t *mask)
168 unsigned long i, *s, *m, x;
171 s = pending->signal.sig;
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
186 switch (_NSIG_WORDS) {
188 for (i = 1; i < _NSIG_WORDS; ++i) {
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
201 sig = ffz(~x) + _NSIG_BPW + 1;
212 static inline void print_dropped_signal(int sig)
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
216 if (!print_fatal_signals)
219 if (!__ratelimit(&ratelimit_state))
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
227 * task_set_jobctl_pending - set jobctl pending bits
229 * @mask: pending bits to set
231 * Clear @mask from @task->jobctl. @mask must be subset of
232 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
233 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
234 * cleared. If @task is already being killed or exiting, this function
238 * Must be called with @task->sighand->siglock held.
241 * %true if @mask is set, %false if made noop because @task was dying.
243 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
245 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
246 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
247 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
249 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
252 if (mask & JOBCTL_STOP_SIGMASK)
253 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
255 task->jobctl |= mask;
260 * task_clear_jobctl_trapping - clear jobctl trapping bit
263 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
264 * Clear it and wake up the ptracer. Note that we don't need any further
265 * locking. @task->siglock guarantees that @task->parent points to the
269 * Must be called with @task->sighand->siglock held.
271 void task_clear_jobctl_trapping(struct task_struct *task)
273 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
274 task->jobctl &= ~JOBCTL_TRAPPING;
275 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
280 * task_clear_jobctl_pending - clear jobctl pending bits
282 * @mask: pending bits to clear
284 * Clear @mask from @task->jobctl. @mask must be subset of
285 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
286 * STOP bits are cleared together.
288 * If clearing of @mask leaves no stop or trap pending, this function calls
289 * task_clear_jobctl_trapping().
292 * Must be called with @task->sighand->siglock held.
294 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
296 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
298 if (mask & JOBCTL_STOP_PENDING)
299 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
301 task->jobctl &= ~mask;
303 if (!(task->jobctl & JOBCTL_PENDING_MASK))
304 task_clear_jobctl_trapping(task);
308 * task_participate_group_stop - participate in a group stop
309 * @task: task participating in a group stop
311 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
312 * Group stop states are cleared and the group stop count is consumed if
313 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
314 * stop, the appropriate %SIGNAL_* flags are set.
317 * Must be called with @task->sighand->siglock held.
320 * %true if group stop completion should be notified to the parent, %false
323 static bool task_participate_group_stop(struct task_struct *task)
325 struct signal_struct *sig = task->signal;
326 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
328 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
330 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
335 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
336 sig->group_stop_count--;
339 * Tell the caller to notify completion iff we are entering into a
340 * fresh group stop. Read comment in do_signal_stop() for details.
342 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
343 sig->flags = SIGNAL_STOP_STOPPED;
350 * allocate a new signal queue record
351 * - this may be called without locks if and only if t == current, otherwise an
352 * appropriate lock must be held to stop the target task from exiting
354 static struct sigqueue *
355 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
357 struct sigqueue *q = NULL;
358 struct user_struct *user;
361 * Protect access to @t credentials. This can go away when all
362 * callers hold rcu read lock.
365 user = get_uid(__task_cred(t)->user);
366 atomic_inc(&user->sigpending);
369 if (override_rlimit ||
370 atomic_read(&user->sigpending) <=
371 task_rlimit(t, RLIMIT_SIGPENDING)) {
372 q = kmem_cache_alloc(sigqueue_cachep, flags);
374 print_dropped_signal(sig);
377 if (unlikely(q == NULL)) {
378 atomic_dec(&user->sigpending);
381 INIT_LIST_HEAD(&q->list);
389 static void __sigqueue_free(struct sigqueue *q)
391 if (q->flags & SIGQUEUE_PREALLOC)
393 atomic_dec(&q->user->sigpending);
395 kmem_cache_free(sigqueue_cachep, q);
398 void flush_sigqueue(struct sigpending *queue)
402 sigemptyset(&queue->signal);
403 while (!list_empty(&queue->list)) {
404 q = list_entry(queue->list.next, struct sigqueue , list);
405 list_del_init(&q->list);
411 * Flush all pending signals for a task.
413 void __flush_signals(struct task_struct *t)
415 clear_tsk_thread_flag(t, TIF_SIGPENDING);
416 flush_sigqueue(&t->pending);
417 flush_sigqueue(&t->signal->shared_pending);
420 void flush_signals(struct task_struct *t)
424 spin_lock_irqsave(&t->sighand->siglock, flags);
426 spin_unlock_irqrestore(&t->sighand->siglock, flags);
429 static void __flush_itimer_signals(struct sigpending *pending)
431 sigset_t signal, retain;
432 struct sigqueue *q, *n;
434 signal = pending->signal;
435 sigemptyset(&retain);
437 list_for_each_entry_safe(q, n, &pending->list, list) {
438 int sig = q->info.si_signo;
440 if (likely(q->info.si_code != SI_TIMER)) {
441 sigaddset(&retain, sig);
443 sigdelset(&signal, sig);
444 list_del_init(&q->list);
449 sigorsets(&pending->signal, &signal, &retain);
452 void flush_itimer_signals(void)
454 struct task_struct *tsk = current;
457 spin_lock_irqsave(&tsk->sighand->siglock, flags);
458 __flush_itimer_signals(&tsk->pending);
459 __flush_itimer_signals(&tsk->signal->shared_pending);
460 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
463 void ignore_signals(struct task_struct *t)
467 for (i = 0; i < _NSIG; ++i)
468 t->sighand->action[i].sa.sa_handler = SIG_IGN;
474 * Flush all handlers for a task.
478 flush_signal_handlers(struct task_struct *t, int force_default)
481 struct k_sigaction *ka = &t->sighand->action[0];
482 for (i = _NSIG ; i != 0 ; i--) {
483 if (force_default || ka->sa.sa_handler != SIG_IGN)
484 ka->sa.sa_handler = SIG_DFL;
486 sigemptyset(&ka->sa.sa_mask);
491 int unhandled_signal(struct task_struct *tsk, int sig)
493 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
494 if (is_global_init(tsk))
496 if (handler != SIG_IGN && handler != SIG_DFL)
498 /* if ptraced, let the tracer determine */
503 * Notify the system that a driver wants to block all signals for this
504 * process, and wants to be notified if any signals at all were to be
505 * sent/acted upon. If the notifier routine returns non-zero, then the
506 * signal will be acted upon after all. If the notifier routine returns 0,
507 * then then signal will be blocked. Only one block per process is
508 * allowed. priv is a pointer to private data that the notifier routine
509 * can use to determine if the signal should be blocked or not.
512 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
516 spin_lock_irqsave(¤t->sighand->siglock, flags);
517 current->notifier_mask = mask;
518 current->notifier_data = priv;
519 current->notifier = notifier;
520 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
523 /* Notify the system that blocking has ended. */
526 unblock_all_signals(void)
530 spin_lock_irqsave(¤t->sighand->siglock, flags);
531 current->notifier = NULL;
532 current->notifier_data = NULL;
534 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
537 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
539 struct sigqueue *q, *first = NULL;
542 * Collect the siginfo appropriate to this signal. Check if
543 * there is another siginfo for the same signal.
545 list_for_each_entry(q, &list->list, list) {
546 if (q->info.si_signo == sig) {
553 sigdelset(&list->signal, sig);
557 list_del_init(&first->list);
558 copy_siginfo(info, &first->info);
559 __sigqueue_free(first);
562 * Ok, it wasn't in the queue. This must be
563 * a fast-pathed signal or we must have been
564 * out of queue space. So zero out the info.
566 info->si_signo = sig;
568 info->si_code = SI_USER;
574 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
577 int sig = next_signal(pending, mask);
580 if (current->notifier) {
581 if (sigismember(current->notifier_mask, sig)) {
582 if (!(current->notifier)(current->notifier_data)) {
583 clear_thread_flag(TIF_SIGPENDING);
589 collect_signal(sig, pending, info);
596 * Dequeue a signal and return the element to the caller, which is
597 * expected to free it.
599 * All callers have to hold the siglock.
601 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
605 /* We only dequeue private signals from ourselves, we don't let
606 * signalfd steal them
608 signr = __dequeue_signal(&tsk->pending, mask, info);
610 signr = __dequeue_signal(&tsk->signal->shared_pending,
615 * itimers are process shared and we restart periodic
616 * itimers in the signal delivery path to prevent DoS
617 * attacks in the high resolution timer case. This is
618 * compliant with the old way of self-restarting
619 * itimers, as the SIGALRM is a legacy signal and only
620 * queued once. Changing the restart behaviour to
621 * restart the timer in the signal dequeue path is
622 * reducing the timer noise on heavy loaded !highres
625 if (unlikely(signr == SIGALRM)) {
626 struct hrtimer *tmr = &tsk->signal->real_timer;
628 if (!hrtimer_is_queued(tmr) &&
629 tsk->signal->it_real_incr.tv64 != 0) {
630 hrtimer_forward(tmr, tmr->base->get_time(),
631 tsk->signal->it_real_incr);
632 hrtimer_restart(tmr);
641 if (unlikely(sig_kernel_stop(signr))) {
643 * Set a marker that we have dequeued a stop signal. Our
644 * caller might release the siglock and then the pending
645 * stop signal it is about to process is no longer in the
646 * pending bitmasks, but must still be cleared by a SIGCONT
647 * (and overruled by a SIGKILL). So those cases clear this
648 * shared flag after we've set it. Note that this flag may
649 * remain set after the signal we return is ignored or
650 * handled. That doesn't matter because its only purpose
651 * is to alert stop-signal processing code when another
652 * processor has come along and cleared the flag.
654 current->jobctl |= JOBCTL_STOP_DEQUEUED;
656 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
658 * Release the siglock to ensure proper locking order
659 * of timer locks outside of siglocks. Note, we leave
660 * irqs disabled here, since the posix-timers code is
661 * about to disable them again anyway.
663 spin_unlock(&tsk->sighand->siglock);
664 do_schedule_next_timer(info);
665 spin_lock(&tsk->sighand->siglock);
671 * Tell a process that it has a new active signal..
673 * NOTE! we rely on the previous spin_lock to
674 * lock interrupts for us! We can only be called with
675 * "siglock" held, and the local interrupt must
676 * have been disabled when that got acquired!
678 * No need to set need_resched since signal event passing
679 * goes through ->blocked
681 void signal_wake_up(struct task_struct *t, int resume)
685 set_tsk_thread_flag(t, TIF_SIGPENDING);
688 * For SIGKILL, we want to wake it up in the stopped/traced/killable
689 * case. We don't check t->state here because there is a race with it
690 * executing another processor and just now entering stopped state.
691 * By using wake_up_state, we ensure the process will wake up and
692 * handle its death signal.
694 mask = TASK_INTERRUPTIBLE;
696 mask |= TASK_WAKEKILL;
697 if (!wake_up_state(t, mask))
702 * Remove signals in mask from the pending set and queue.
703 * Returns 1 if any signals were found.
705 * All callers must be holding the siglock.
707 * This version takes a sigset mask and looks at all signals,
708 * not just those in the first mask word.
710 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
712 struct sigqueue *q, *n;
715 sigandsets(&m, mask, &s->signal);
716 if (sigisemptyset(&m))
719 sigandnsets(&s->signal, &s->signal, mask);
720 list_for_each_entry_safe(q, n, &s->list, list) {
721 if (sigismember(mask, q->info.si_signo)) {
722 list_del_init(&q->list);
729 * Remove signals in mask from the pending set and queue.
730 * Returns 1 if any signals were found.
732 * All callers must be holding the siglock.
734 static int rm_from_queue(unsigned long mask, struct sigpending *s)
736 struct sigqueue *q, *n;
738 if (!sigtestsetmask(&s->signal, mask))
741 sigdelsetmask(&s->signal, mask);
742 list_for_each_entry_safe(q, n, &s->list, list) {
743 if (q->info.si_signo < SIGRTMIN &&
744 (mask & sigmask(q->info.si_signo))) {
745 list_del_init(&q->list);
752 static inline int is_si_special(const struct siginfo *info)
754 return info <= SEND_SIG_FORCED;
757 static inline bool si_fromuser(const struct siginfo *info)
759 return info == SEND_SIG_NOINFO ||
760 (!is_si_special(info) && SI_FROMUSER(info));
764 * called with RCU read lock from check_kill_permission()
766 static int kill_ok_by_cred(struct task_struct *t)
768 const struct cred *cred = current_cred();
769 const struct cred *tcred = __task_cred(t);
771 if (cred->user->user_ns == tcred->user->user_ns &&
772 (cred->euid == tcred->suid ||
773 cred->euid == tcred->uid ||
774 cred->uid == tcred->suid ||
775 cred->uid == tcred->uid))
778 if (ns_capable(tcred->user->user_ns, CAP_KILL))
785 * Bad permissions for sending the signal
786 * - the caller must hold the RCU read lock
788 static int check_kill_permission(int sig, struct siginfo *info,
789 struct task_struct *t)
794 if (!valid_signal(sig))
797 if (!si_fromuser(info))
800 error = audit_signal_info(sig, t); /* Let audit system see the signal */
804 if (!same_thread_group(current, t) &&
805 !kill_ok_by_cred(t)) {
808 sid = task_session(t);
810 * We don't return the error if sid == NULL. The
811 * task was unhashed, the caller must notice this.
813 if (!sid || sid == task_session(current))
820 return security_task_kill(t, info, sig, 0);
824 * ptrace_trap_notify - schedule trap to notify ptracer
825 * @t: tracee wanting to notify tracer
827 * This function schedules sticky ptrace trap which is cleared on the next
828 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
831 * If @t is running, STOP trap will be taken. If trapped for STOP and
832 * ptracer is listening for events, tracee is woken up so that it can
833 * re-trap for the new event. If trapped otherwise, STOP trap will be
834 * eventually taken without returning to userland after the existing traps
835 * are finished by PTRACE_CONT.
838 * Must be called with @task->sighand->siglock held.
840 static void ptrace_trap_notify(struct task_struct *t)
842 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
843 assert_spin_locked(&t->sighand->siglock);
845 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
846 signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
850 * Handle magic process-wide effects of stop/continue signals. Unlike
851 * the signal actions, these happen immediately at signal-generation
852 * time regardless of blocking, ignoring, or handling. This does the
853 * actual continuing for SIGCONT, but not the actual stopping for stop
854 * signals. The process stop is done as a signal action for SIG_DFL.
856 * Returns true if the signal should be actually delivered, otherwise
857 * it should be dropped.
859 static int prepare_signal(int sig, struct task_struct *p, bool force)
861 struct signal_struct *signal = p->signal;
862 struct task_struct *t;
864 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
866 * The process is in the middle of dying, nothing to do.
868 } else if (sig_kernel_stop(sig)) {
870 * This is a stop signal. Remove SIGCONT from all queues.
872 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
875 rm_from_queue(sigmask(SIGCONT), &t->pending);
876 } while_each_thread(p, t);
877 } else if (sig == SIGCONT) {
880 * Remove all stop signals from all queues, wake all threads.
882 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
885 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
886 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
887 if (likely(!(t->ptrace & PT_SEIZED)))
888 wake_up_state(t, __TASK_STOPPED);
890 ptrace_trap_notify(t);
891 } while_each_thread(p, t);
894 * Notify the parent with CLD_CONTINUED if we were stopped.
896 * If we were in the middle of a group stop, we pretend it
897 * was already finished, and then continued. Since SIGCHLD
898 * doesn't queue we report only CLD_STOPPED, as if the next
899 * CLD_CONTINUED was dropped.
902 if (signal->flags & SIGNAL_STOP_STOPPED)
903 why |= SIGNAL_CLD_CONTINUED;
904 else if (signal->group_stop_count)
905 why |= SIGNAL_CLD_STOPPED;
909 * The first thread which returns from do_signal_stop()
910 * will take ->siglock, notice SIGNAL_CLD_MASK, and
911 * notify its parent. See get_signal_to_deliver().
913 signal->flags = why | SIGNAL_STOP_CONTINUED;
914 signal->group_stop_count = 0;
915 signal->group_exit_code = 0;
919 return !sig_ignored(p, sig, force);
923 * Test if P wants to take SIG. After we've checked all threads with this,
924 * it's equivalent to finding no threads not blocking SIG. Any threads not
925 * blocking SIG were ruled out because they are not running and already
926 * have pending signals. Such threads will dequeue from the shared queue
927 * as soon as they're available, so putting the signal on the shared queue
928 * will be equivalent to sending it to one such thread.
930 static inline int wants_signal(int sig, struct task_struct *p)
932 if (sigismember(&p->blocked, sig))
934 if (p->flags & PF_EXITING)
938 if (task_is_stopped_or_traced(p))
940 return task_curr(p) || !signal_pending(p);
943 static void complete_signal(int sig, struct task_struct *p, int group)
945 struct signal_struct *signal = p->signal;
946 struct task_struct *t;
949 * Now find a thread we can wake up to take the signal off the queue.
951 * If the main thread wants the signal, it gets first crack.
952 * Probably the least surprising to the average bear.
954 if (wants_signal(sig, p))
956 else if (!group || thread_group_empty(p))
958 * There is just one thread and it does not need to be woken.
959 * It will dequeue unblocked signals before it runs again.
964 * Otherwise try to find a suitable thread.
966 t = signal->curr_target;
967 while (!wants_signal(sig, t)) {
969 if (t == signal->curr_target)
971 * No thread needs to be woken.
972 * Any eligible threads will see
973 * the signal in the queue soon.
977 signal->curr_target = t;
981 * Found a killable thread. If the signal will be fatal,
982 * then start taking the whole group down immediately.
984 if (sig_fatal(p, sig) &&
985 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
986 !sigismember(&t->real_blocked, sig) &&
987 (sig == SIGKILL || !t->ptrace)) {
989 * This signal will be fatal to the whole group.
991 if (!sig_kernel_coredump(sig)) {
993 * Start a group exit and wake everybody up.
994 * This way we don't have other threads
995 * running and doing things after a slower
996 * thread has the fatal signal pending.
998 signal->flags = SIGNAL_GROUP_EXIT;
999 signal->group_exit_code = sig;
1000 signal->group_stop_count = 0;
1003 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1004 sigaddset(&t->pending.signal, SIGKILL);
1005 signal_wake_up(t, 1);
1006 } while_each_thread(p, t);
1012 * The signal is already in the shared-pending queue.
1013 * Tell the chosen thread to wake up and dequeue it.
1015 signal_wake_up(t, sig == SIGKILL);
1019 static inline int legacy_queue(struct sigpending *signals, int sig)
1021 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1025 * map the uid in struct cred into user namespace *ns
1027 static inline uid_t map_cred_ns(const struct cred *cred,
1028 struct user_namespace *ns)
1030 return user_ns_map_uid(ns, cred, cred->uid);
1033 #ifdef CONFIG_USER_NS
1034 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1036 if (current_user_ns() == task_cred_xxx(t, user_ns))
1039 if (SI_FROMKERNEL(info))
1042 info->si_uid = user_ns_map_uid(task_cred_xxx(t, user_ns),
1043 current_cred(), info->si_uid);
1046 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1052 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1053 int group, int from_ancestor_ns)
1055 struct sigpending *pending;
1057 int override_rlimit;
1058 int ret = 0, result;
1060 assert_spin_locked(&t->sighand->siglock);
1062 result = TRACE_SIGNAL_IGNORED;
1063 if (!prepare_signal(sig, t,
1064 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1067 pending = group ? &t->signal->shared_pending : &t->pending;
1069 * Short-circuit ignored signals and support queuing
1070 * exactly one non-rt signal, so that we can get more
1071 * detailed information about the cause of the signal.
1073 result = TRACE_SIGNAL_ALREADY_PENDING;
1074 if (legacy_queue(pending, sig))
1077 result = TRACE_SIGNAL_DELIVERED;
1079 * fast-pathed signals for kernel-internal things like SIGSTOP
1082 if (info == SEND_SIG_FORCED)
1086 * Real-time signals must be queued if sent by sigqueue, or
1087 * some other real-time mechanism. It is implementation
1088 * defined whether kill() does so. We attempt to do so, on
1089 * the principle of least surprise, but since kill is not
1090 * allowed to fail with EAGAIN when low on memory we just
1091 * make sure at least one signal gets delivered and don't
1092 * pass on the info struct.
1095 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1097 override_rlimit = 0;
1099 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1102 list_add_tail(&q->list, &pending->list);
1103 switch ((unsigned long) info) {
1104 case (unsigned long) SEND_SIG_NOINFO:
1105 q->info.si_signo = sig;
1106 q->info.si_errno = 0;
1107 q->info.si_code = SI_USER;
1108 q->info.si_pid = task_tgid_nr_ns(current,
1109 task_active_pid_ns(t));
1110 q->info.si_uid = current_uid();
1112 case (unsigned long) SEND_SIG_PRIV:
1113 q->info.si_signo = sig;
1114 q->info.si_errno = 0;
1115 q->info.si_code = SI_KERNEL;
1120 copy_siginfo(&q->info, info);
1121 if (from_ancestor_ns)
1126 userns_fixup_signal_uid(&q->info, t);
1128 } else if (!is_si_special(info)) {
1129 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1131 * Queue overflow, abort. We may abort if the
1132 * signal was rt and sent by user using something
1133 * other than kill().
1135 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1140 * This is a silent loss of information. We still
1141 * send the signal, but the *info bits are lost.
1143 result = TRACE_SIGNAL_LOSE_INFO;
1148 signalfd_notify(t, sig);
1149 sigaddset(&pending->signal, sig);
1150 complete_signal(sig, t, group);
1152 trace_signal_generate(sig, info, t, group, result);
1156 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1159 int from_ancestor_ns = 0;
1161 #ifdef CONFIG_PID_NS
1162 from_ancestor_ns = si_fromuser(info) &&
1163 !task_pid_nr_ns(current, task_active_pid_ns(t));
1166 return __send_signal(sig, info, t, group, from_ancestor_ns);
1169 static void print_fatal_signal(struct pt_regs *regs, int signr)
1171 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1172 current->comm, task_pid_nr(current), signr);
1174 #if defined(__i386__) && !defined(__arch_um__)
1175 printk("code at %08lx: ", regs->ip);
1178 for (i = 0; i < 16; i++) {
1181 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1183 printk("%02x ", insn);
1193 static int __init setup_print_fatal_signals(char *str)
1195 get_option (&str, &print_fatal_signals);
1200 __setup("print-fatal-signals=", setup_print_fatal_signals);
1203 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1205 return send_signal(sig, info, p, 1);
1209 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1211 return send_signal(sig, info, t, 0);
1214 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1217 unsigned long flags;
1220 if (lock_task_sighand(p, &flags)) {
1221 ret = send_signal(sig, info, p, group);
1222 unlock_task_sighand(p, &flags);
1229 * Force a signal that the process can't ignore: if necessary
1230 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1232 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1233 * since we do not want to have a signal handler that was blocked
1234 * be invoked when user space had explicitly blocked it.
1236 * We don't want to have recursive SIGSEGV's etc, for example,
1237 * that is why we also clear SIGNAL_UNKILLABLE.
1240 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1242 unsigned long int flags;
1243 int ret, blocked, ignored;
1244 struct k_sigaction *action;
1246 spin_lock_irqsave(&t->sighand->siglock, flags);
1247 action = &t->sighand->action[sig-1];
1248 ignored = action->sa.sa_handler == SIG_IGN;
1249 blocked = sigismember(&t->blocked, sig);
1250 if (blocked || ignored) {
1251 action->sa.sa_handler = SIG_DFL;
1253 sigdelset(&t->blocked, sig);
1254 recalc_sigpending_and_wake(t);
1257 if (action->sa.sa_handler == SIG_DFL)
1258 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1259 ret = specific_send_sig_info(sig, info, t);
1260 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1266 * Nuke all other threads in the group.
1268 int zap_other_threads(struct task_struct *p)
1270 struct task_struct *t = p;
1273 p->signal->group_stop_count = 0;
1275 while_each_thread(p, t) {
1276 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1279 /* Don't bother with already dead threads */
1282 sigaddset(&t->pending.signal, SIGKILL);
1283 signal_wake_up(t, 1);
1289 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1290 unsigned long *flags)
1292 struct sighand_struct *sighand;
1295 local_irq_save(*flags);
1297 sighand = rcu_dereference(tsk->sighand);
1298 if (unlikely(sighand == NULL)) {
1300 local_irq_restore(*flags);
1304 spin_lock(&sighand->siglock);
1305 if (likely(sighand == tsk->sighand)) {
1309 spin_unlock(&sighand->siglock);
1311 local_irq_restore(*flags);
1318 * send signal info to all the members of a group
1320 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1325 ret = check_kill_permission(sig, info, p);
1329 ret = do_send_sig_info(sig, info, p, true);
1335 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1336 * control characters do (^C, ^Z etc)
1337 * - the caller must hold at least a readlock on tasklist_lock
1339 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1341 struct task_struct *p = NULL;
1342 int retval, success;
1346 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1347 int err = group_send_sig_info(sig, info, p);
1350 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1351 return success ? 0 : retval;
1354 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1357 struct task_struct *p;
1361 p = pid_task(pid, PIDTYPE_PID);
1363 error = group_send_sig_info(sig, info, p);
1364 if (unlikely(error == -ESRCH))
1366 * The task was unhashed in between, try again.
1367 * If it is dead, pid_task() will return NULL,
1368 * if we race with de_thread() it will find the
1378 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1382 error = kill_pid_info(sig, info, find_vpid(pid));
1387 static int kill_as_cred_perm(const struct cred *cred,
1388 struct task_struct *target)
1390 const struct cred *pcred = __task_cred(target);
1391 if (cred->user_ns != pcred->user_ns)
1393 if (cred->euid != pcred->suid && cred->euid != pcred->uid &&
1394 cred->uid != pcred->suid && cred->uid != pcred->uid)
1399 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1400 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1401 const struct cred *cred, u32 secid)
1404 struct task_struct *p;
1405 unsigned long flags;
1407 if (!valid_signal(sig))
1411 p = pid_task(pid, PIDTYPE_PID);
1416 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1420 ret = security_task_kill(p, info, sig, secid);
1425 if (lock_task_sighand(p, &flags)) {
1426 ret = __send_signal(sig, info, p, 1, 0);
1427 unlock_task_sighand(p, &flags);
1435 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1438 * kill_something_info() interprets pid in interesting ways just like kill(2).
1440 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1441 * is probably wrong. Should make it like BSD or SYSV.
1444 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1450 ret = kill_pid_info(sig, info, find_vpid(pid));
1455 read_lock(&tasklist_lock);
1457 ret = __kill_pgrp_info(sig, info,
1458 pid ? find_vpid(-pid) : task_pgrp(current));
1460 int retval = 0, count = 0;
1461 struct task_struct * p;
1463 for_each_process(p) {
1464 if (task_pid_vnr(p) > 1 &&
1465 !same_thread_group(p, current)) {
1466 int err = group_send_sig_info(sig, info, p);
1472 ret = count ? retval : -ESRCH;
1474 read_unlock(&tasklist_lock);
1480 * These are for backward compatibility with the rest of the kernel source.
1483 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1486 * Make sure legacy kernel users don't send in bad values
1487 * (normal paths check this in check_kill_permission).
1489 if (!valid_signal(sig))
1492 return do_send_sig_info(sig, info, p, false);
1495 #define __si_special(priv) \
1496 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1499 send_sig(int sig, struct task_struct *p, int priv)
1501 return send_sig_info(sig, __si_special(priv), p);
1505 force_sig(int sig, struct task_struct *p)
1507 force_sig_info(sig, SEND_SIG_PRIV, p);
1511 * When things go south during signal handling, we
1512 * will force a SIGSEGV. And if the signal that caused
1513 * the problem was already a SIGSEGV, we'll want to
1514 * make sure we don't even try to deliver the signal..
1517 force_sigsegv(int sig, struct task_struct *p)
1519 if (sig == SIGSEGV) {
1520 unsigned long flags;
1521 spin_lock_irqsave(&p->sighand->siglock, flags);
1522 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1523 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1525 force_sig(SIGSEGV, p);
1529 int kill_pgrp(struct pid *pid, int sig, int priv)
1533 read_lock(&tasklist_lock);
1534 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1535 read_unlock(&tasklist_lock);
1539 EXPORT_SYMBOL(kill_pgrp);
1541 int kill_pid(struct pid *pid, int sig, int priv)
1543 return kill_pid_info(sig, __si_special(priv), pid);
1545 EXPORT_SYMBOL(kill_pid);
1548 * These functions support sending signals using preallocated sigqueue
1549 * structures. This is needed "because realtime applications cannot
1550 * afford to lose notifications of asynchronous events, like timer
1551 * expirations or I/O completions". In the case of POSIX Timers
1552 * we allocate the sigqueue structure from the timer_create. If this
1553 * allocation fails we are able to report the failure to the application
1554 * with an EAGAIN error.
1556 struct sigqueue *sigqueue_alloc(void)
1558 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1561 q->flags |= SIGQUEUE_PREALLOC;
1566 void sigqueue_free(struct sigqueue *q)
1568 unsigned long flags;
1569 spinlock_t *lock = ¤t->sighand->siglock;
1571 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1573 * We must hold ->siglock while testing q->list
1574 * to serialize with collect_signal() or with
1575 * __exit_signal()->flush_sigqueue().
1577 spin_lock_irqsave(lock, flags);
1578 q->flags &= ~SIGQUEUE_PREALLOC;
1580 * If it is queued it will be freed when dequeued,
1581 * like the "regular" sigqueue.
1583 if (!list_empty(&q->list))
1585 spin_unlock_irqrestore(lock, flags);
1591 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1593 int sig = q->info.si_signo;
1594 struct sigpending *pending;
1595 unsigned long flags;
1598 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1601 if (!likely(lock_task_sighand(t, &flags)))
1604 ret = 1; /* the signal is ignored */
1605 result = TRACE_SIGNAL_IGNORED;
1606 if (!prepare_signal(sig, t, false))
1610 if (unlikely(!list_empty(&q->list))) {
1612 * If an SI_TIMER entry is already queue just increment
1613 * the overrun count.
1615 BUG_ON(q->info.si_code != SI_TIMER);
1616 q->info.si_overrun++;
1617 result = TRACE_SIGNAL_ALREADY_PENDING;
1620 q->info.si_overrun = 0;
1622 signalfd_notify(t, sig);
1623 pending = group ? &t->signal->shared_pending : &t->pending;
1624 list_add_tail(&q->list, &pending->list);
1625 sigaddset(&pending->signal, sig);
1626 complete_signal(sig, t, group);
1627 result = TRACE_SIGNAL_DELIVERED;
1629 trace_signal_generate(sig, &q->info, t, group, result);
1630 unlock_task_sighand(t, &flags);
1636 * Let a parent know about the death of a child.
1637 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1639 * Returns true if our parent ignored us and so we've switched to
1642 bool do_notify_parent(struct task_struct *tsk, int sig)
1644 struct siginfo info;
1645 unsigned long flags;
1646 struct sighand_struct *psig;
1647 bool autoreap = false;
1651 /* do_notify_parent_cldstop should have been called instead. */
1652 BUG_ON(task_is_stopped_or_traced(tsk));
1654 BUG_ON(!tsk->ptrace &&
1655 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1657 if (sig != SIGCHLD) {
1659 * This is only possible if parent == real_parent.
1660 * Check if it has changed security domain.
1662 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1666 info.si_signo = sig;
1669 * we are under tasklist_lock here so our parent is tied to
1670 * us and cannot exit and release its namespace.
1672 * the only it can is to switch its nsproxy with sys_unshare,
1673 * bu uncharing pid namespaces is not allowed, so we'll always
1674 * see relevant namespace
1676 * write_lock() currently calls preempt_disable() which is the
1677 * same as rcu_read_lock(), but according to Oleg, this is not
1678 * correct to rely on this
1681 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1682 info.si_uid = map_cred_ns(__task_cred(tsk),
1683 task_cred_xxx(tsk->parent, user_ns));
1686 info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
1687 info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
1689 info.si_status = tsk->exit_code & 0x7f;
1690 if (tsk->exit_code & 0x80)
1691 info.si_code = CLD_DUMPED;
1692 else if (tsk->exit_code & 0x7f)
1693 info.si_code = CLD_KILLED;
1695 info.si_code = CLD_EXITED;
1696 info.si_status = tsk->exit_code >> 8;
1699 psig = tsk->parent->sighand;
1700 spin_lock_irqsave(&psig->siglock, flags);
1701 if (!tsk->ptrace && sig == SIGCHLD &&
1702 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1703 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1705 * We are exiting and our parent doesn't care. POSIX.1
1706 * defines special semantics for setting SIGCHLD to SIG_IGN
1707 * or setting the SA_NOCLDWAIT flag: we should be reaped
1708 * automatically and not left for our parent's wait4 call.
1709 * Rather than having the parent do it as a magic kind of
1710 * signal handler, we just set this to tell do_exit that we
1711 * can be cleaned up without becoming a zombie. Note that
1712 * we still call __wake_up_parent in this case, because a
1713 * blocked sys_wait4 might now return -ECHILD.
1715 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1716 * is implementation-defined: we do (if you don't want
1717 * it, just use SIG_IGN instead).
1720 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1723 if (valid_signal(sig) && sig)
1724 __group_send_sig_info(sig, &info, tsk->parent);
1725 __wake_up_parent(tsk, tsk->parent);
1726 spin_unlock_irqrestore(&psig->siglock, flags);
1732 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1733 * @tsk: task reporting the state change
1734 * @for_ptracer: the notification is for ptracer
1735 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1737 * Notify @tsk's parent that the stopped/continued state has changed. If
1738 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1739 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1742 * Must be called with tasklist_lock at least read locked.
1744 static void do_notify_parent_cldstop(struct task_struct *tsk,
1745 bool for_ptracer, int why)
1747 struct siginfo info;
1748 unsigned long flags;
1749 struct task_struct *parent;
1750 struct sighand_struct *sighand;
1753 parent = tsk->parent;
1755 tsk = tsk->group_leader;
1756 parent = tsk->real_parent;
1759 info.si_signo = SIGCHLD;
1762 * see comment in do_notify_parent() about the following 4 lines
1765 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1766 info.si_uid = map_cred_ns(__task_cred(tsk),
1767 task_cred_xxx(parent, user_ns));
1770 info.si_utime = cputime_to_clock_t(tsk->utime);
1771 info.si_stime = cputime_to_clock_t(tsk->stime);
1776 info.si_status = SIGCONT;
1779 info.si_status = tsk->signal->group_exit_code & 0x7f;
1782 info.si_status = tsk->exit_code & 0x7f;
1788 sighand = parent->sighand;
1789 spin_lock_irqsave(&sighand->siglock, flags);
1790 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1791 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1792 __group_send_sig_info(SIGCHLD, &info, parent);
1794 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1796 __wake_up_parent(tsk, parent);
1797 spin_unlock_irqrestore(&sighand->siglock, flags);
1800 static inline int may_ptrace_stop(void)
1802 if (!likely(current->ptrace))
1805 * Are we in the middle of do_coredump?
1806 * If so and our tracer is also part of the coredump stopping
1807 * is a deadlock situation, and pointless because our tracer
1808 * is dead so don't allow us to stop.
1809 * If SIGKILL was already sent before the caller unlocked
1810 * ->siglock we must see ->core_state != NULL. Otherwise it
1811 * is safe to enter schedule().
1813 if (unlikely(current->mm->core_state) &&
1814 unlikely(current->mm == current->parent->mm))
1821 * Return non-zero if there is a SIGKILL that should be waking us up.
1822 * Called with the siglock held.
1824 static int sigkill_pending(struct task_struct *tsk)
1826 return sigismember(&tsk->pending.signal, SIGKILL) ||
1827 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1831 * This must be called with current->sighand->siglock held.
1833 * This should be the path for all ptrace stops.
1834 * We always set current->last_siginfo while stopped here.
1835 * That makes it a way to test a stopped process for
1836 * being ptrace-stopped vs being job-control-stopped.
1838 * If we actually decide not to stop at all because the tracer
1839 * is gone, we keep current->exit_code unless clear_code.
1841 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1842 __releases(¤t->sighand->siglock)
1843 __acquires(¤t->sighand->siglock)
1845 bool gstop_done = false;
1847 if (arch_ptrace_stop_needed(exit_code, info)) {
1849 * The arch code has something special to do before a
1850 * ptrace stop. This is allowed to block, e.g. for faults
1851 * on user stack pages. We can't keep the siglock while
1852 * calling arch_ptrace_stop, so we must release it now.
1853 * To preserve proper semantics, we must do this before
1854 * any signal bookkeeping like checking group_stop_count.
1855 * Meanwhile, a SIGKILL could come in before we retake the
1856 * siglock. That must prevent us from sleeping in TASK_TRACED.
1857 * So after regaining the lock, we must check for SIGKILL.
1859 spin_unlock_irq(¤t->sighand->siglock);
1860 arch_ptrace_stop(exit_code, info);
1861 spin_lock_irq(¤t->sighand->siglock);
1862 if (sigkill_pending(current))
1867 * We're committing to trapping. TRACED should be visible before
1868 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1869 * Also, transition to TRACED and updates to ->jobctl should be
1870 * atomic with respect to siglock and should be done after the arch
1871 * hook as siglock is released and regrabbed across it.
1873 set_current_state(TASK_TRACED);
1875 current->last_siginfo = info;
1876 current->exit_code = exit_code;
1879 * If @why is CLD_STOPPED, we're trapping to participate in a group
1880 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1881 * across siglock relocks since INTERRUPT was scheduled, PENDING
1882 * could be clear now. We act as if SIGCONT is received after
1883 * TASK_TRACED is entered - ignore it.
1885 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1886 gstop_done = task_participate_group_stop(current);
1888 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1889 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1890 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1891 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1893 /* entering a trap, clear TRAPPING */
1894 task_clear_jobctl_trapping(current);
1896 spin_unlock_irq(¤t->sighand->siglock);
1897 read_lock(&tasklist_lock);
1898 if (may_ptrace_stop()) {
1900 * Notify parents of the stop.
1902 * While ptraced, there are two parents - the ptracer and
1903 * the real_parent of the group_leader. The ptracer should
1904 * know about every stop while the real parent is only
1905 * interested in the completion of group stop. The states
1906 * for the two don't interact with each other. Notify
1907 * separately unless they're gonna be duplicates.
1909 do_notify_parent_cldstop(current, true, why);
1910 if (gstop_done && ptrace_reparented(current))
1911 do_notify_parent_cldstop(current, false, why);
1914 * Don't want to allow preemption here, because
1915 * sys_ptrace() needs this task to be inactive.
1917 * XXX: implement read_unlock_no_resched().
1920 read_unlock(&tasklist_lock);
1921 preempt_enable_no_resched();
1925 * By the time we got the lock, our tracer went away.
1926 * Don't drop the lock yet, another tracer may come.
1928 * If @gstop_done, the ptracer went away between group stop
1929 * completion and here. During detach, it would have set
1930 * JOBCTL_STOP_PENDING on us and we'll re-enter
1931 * TASK_STOPPED in do_signal_stop() on return, so notifying
1932 * the real parent of the group stop completion is enough.
1935 do_notify_parent_cldstop(current, false, why);
1937 __set_current_state(TASK_RUNNING);
1939 current->exit_code = 0;
1940 read_unlock(&tasklist_lock);
1944 * While in TASK_TRACED, we were considered "frozen enough".
1945 * Now that we woke up, it's crucial if we're supposed to be
1946 * frozen that we freeze now before running anything substantial.
1951 * We are back. Now reacquire the siglock before touching
1952 * last_siginfo, so that we are sure to have synchronized with
1953 * any signal-sending on another CPU that wants to examine it.
1955 spin_lock_irq(¤t->sighand->siglock);
1956 current->last_siginfo = NULL;
1958 /* LISTENING can be set only during STOP traps, clear it */
1959 current->jobctl &= ~JOBCTL_LISTENING;
1962 * Queued signals ignored us while we were stopped for tracing.
1963 * So check for any that we should take before resuming user mode.
1964 * This sets TIF_SIGPENDING, but never clears it.
1966 recalc_sigpending_tsk(current);
1969 static void ptrace_do_notify(int signr, int exit_code, int why)
1973 memset(&info, 0, sizeof info);
1974 info.si_signo = signr;
1975 info.si_code = exit_code;
1976 info.si_pid = task_pid_vnr(current);
1977 info.si_uid = current_uid();
1979 /* Let the debugger run. */
1980 ptrace_stop(exit_code, why, 1, &info);
1983 void ptrace_notify(int exit_code)
1985 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1987 spin_lock_irq(¤t->sighand->siglock);
1988 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1989 spin_unlock_irq(¤t->sighand->siglock);
1993 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1994 * @signr: signr causing group stop if initiating
1996 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1997 * and participate in it. If already set, participate in the existing
1998 * group stop. If participated in a group stop (and thus slept), %true is
1999 * returned with siglock released.
2001 * If ptraced, this function doesn't handle stop itself. Instead,
2002 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2003 * untouched. The caller must ensure that INTERRUPT trap handling takes
2004 * places afterwards.
2007 * Must be called with @current->sighand->siglock held, which is released
2011 * %false if group stop is already cancelled or ptrace trap is scheduled.
2012 * %true if participated in group stop.
2014 static bool do_signal_stop(int signr)
2015 __releases(¤t->sighand->siglock)
2017 struct signal_struct *sig = current->signal;
2019 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2020 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2021 struct task_struct *t;
2023 /* signr will be recorded in task->jobctl for retries */
2024 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2026 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2027 unlikely(signal_group_exit(sig)))
2030 * There is no group stop already in progress. We must
2033 * While ptraced, a task may be resumed while group stop is
2034 * still in effect and then receive a stop signal and
2035 * initiate another group stop. This deviates from the
2036 * usual behavior as two consecutive stop signals can't
2037 * cause two group stops when !ptraced. That is why we
2038 * also check !task_is_stopped(t) below.
2040 * The condition can be distinguished by testing whether
2041 * SIGNAL_STOP_STOPPED is already set. Don't generate
2042 * group_exit_code in such case.
2044 * This is not necessary for SIGNAL_STOP_CONTINUED because
2045 * an intervening stop signal is required to cause two
2046 * continued events regardless of ptrace.
2048 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2049 sig->group_exit_code = signr;
2051 sig->group_stop_count = 0;
2053 if (task_set_jobctl_pending(current, signr | gstop))
2054 sig->group_stop_count++;
2056 for (t = next_thread(current); t != current;
2057 t = next_thread(t)) {
2059 * Setting state to TASK_STOPPED for a group
2060 * stop is always done with the siglock held,
2061 * so this check has no races.
2063 if (!task_is_stopped(t) &&
2064 task_set_jobctl_pending(t, signr | gstop)) {
2065 sig->group_stop_count++;
2066 if (likely(!(t->ptrace & PT_SEIZED)))
2067 signal_wake_up(t, 0);
2069 ptrace_trap_notify(t);
2074 if (likely(!current->ptrace)) {
2078 * If there are no other threads in the group, or if there
2079 * is a group stop in progress and we are the last to stop,
2080 * report to the parent.
2082 if (task_participate_group_stop(current))
2083 notify = CLD_STOPPED;
2085 __set_current_state(TASK_STOPPED);
2086 spin_unlock_irq(¤t->sighand->siglock);
2089 * Notify the parent of the group stop completion. Because
2090 * we're not holding either the siglock or tasklist_lock
2091 * here, ptracer may attach inbetween; however, this is for
2092 * group stop and should always be delivered to the real
2093 * parent of the group leader. The new ptracer will get
2094 * its notification when this task transitions into
2098 read_lock(&tasklist_lock);
2099 do_notify_parent_cldstop(current, false, notify);
2100 read_unlock(&tasklist_lock);
2103 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2108 * While ptraced, group stop is handled by STOP trap.
2109 * Schedule it and let the caller deal with it.
2111 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2117 * do_jobctl_trap - take care of ptrace jobctl traps
2119 * When PT_SEIZED, it's used for both group stop and explicit
2120 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2121 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2122 * the stop signal; otherwise, %SIGTRAP.
2124 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2125 * number as exit_code and no siginfo.
2128 * Must be called with @current->sighand->siglock held, which may be
2129 * released and re-acquired before returning with intervening sleep.
2131 static void do_jobctl_trap(void)
2133 struct signal_struct *signal = current->signal;
2134 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2136 if (current->ptrace & PT_SEIZED) {
2137 if (!signal->group_stop_count &&
2138 !(signal->flags & SIGNAL_STOP_STOPPED))
2140 WARN_ON_ONCE(!signr);
2141 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2144 WARN_ON_ONCE(!signr);
2145 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2146 current->exit_code = 0;
2150 static int ptrace_signal(int signr, siginfo_t *info,
2151 struct pt_regs *regs, void *cookie)
2153 ptrace_signal_deliver(regs, cookie);
2155 * We do not check sig_kernel_stop(signr) but set this marker
2156 * unconditionally because we do not know whether debugger will
2157 * change signr. This flag has no meaning unless we are going
2158 * to stop after return from ptrace_stop(). In this case it will
2159 * be checked in do_signal_stop(), we should only stop if it was
2160 * not cleared by SIGCONT while we were sleeping. See also the
2161 * comment in dequeue_signal().
2163 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2164 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2166 /* We're back. Did the debugger cancel the sig? */
2167 signr = current->exit_code;
2171 current->exit_code = 0;
2174 * Update the siginfo structure if the signal has
2175 * changed. If the debugger wanted something
2176 * specific in the siginfo structure then it should
2177 * have updated *info via PTRACE_SETSIGINFO.
2179 if (signr != info->si_signo) {
2180 info->si_signo = signr;
2182 info->si_code = SI_USER;
2184 info->si_pid = task_pid_vnr(current->parent);
2185 info->si_uid = map_cred_ns(__task_cred(current->parent),
2190 /* If the (new) signal is now blocked, requeue it. */
2191 if (sigismember(¤t->blocked, signr)) {
2192 specific_send_sig_info(signr, info, current);
2199 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2200 struct pt_regs *regs, void *cookie)
2202 struct sighand_struct *sighand = current->sighand;
2203 struct signal_struct *signal = current->signal;
2206 if (unlikely(uprobe_deny_signal()))
2211 * We'll jump back here after any time we were stopped in TASK_STOPPED.
2212 * While in TASK_STOPPED, we were considered "frozen enough".
2213 * Now that we woke up, it's crucial if we're supposed to be
2214 * frozen that we freeze now before running anything substantial.
2218 spin_lock_irq(&sighand->siglock);
2220 * Every stopped thread goes here after wakeup. Check to see if
2221 * we should notify the parent, prepare_signal(SIGCONT) encodes
2222 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2224 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2227 if (signal->flags & SIGNAL_CLD_CONTINUED)
2228 why = CLD_CONTINUED;
2232 signal->flags &= ~SIGNAL_CLD_MASK;
2234 spin_unlock_irq(&sighand->siglock);
2237 * Notify the parent that we're continuing. This event is
2238 * always per-process and doesn't make whole lot of sense
2239 * for ptracers, who shouldn't consume the state via
2240 * wait(2) either, but, for backward compatibility, notify
2241 * the ptracer of the group leader too unless it's gonna be
2244 read_lock(&tasklist_lock);
2245 do_notify_parent_cldstop(current, false, why);
2247 if (ptrace_reparented(current->group_leader))
2248 do_notify_parent_cldstop(current->group_leader,
2250 read_unlock(&tasklist_lock);
2256 struct k_sigaction *ka;
2258 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2262 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2264 spin_unlock_irq(&sighand->siglock);
2268 signr = dequeue_signal(current, ¤t->blocked, info);
2271 break; /* will return 0 */
2273 if (unlikely(current->ptrace) && signr != SIGKILL) {
2274 signr = ptrace_signal(signr, info,
2280 ka = &sighand->action[signr-1];
2282 /* Trace actually delivered signals. */
2283 trace_signal_deliver(signr, info, ka);
2285 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2287 if (ka->sa.sa_handler != SIG_DFL) {
2288 /* Run the handler. */
2291 if (ka->sa.sa_flags & SA_ONESHOT)
2292 ka->sa.sa_handler = SIG_DFL;
2294 break; /* will return non-zero "signr" value */
2298 * Now we are doing the default action for this signal.
2300 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2304 * Global init gets no signals it doesn't want.
2305 * Container-init gets no signals it doesn't want from same
2308 * Note that if global/container-init sees a sig_kernel_only()
2309 * signal here, the signal must have been generated internally
2310 * or must have come from an ancestor namespace. In either
2311 * case, the signal cannot be dropped.
2313 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2314 !sig_kernel_only(signr))
2317 if (sig_kernel_stop(signr)) {
2319 * The default action is to stop all threads in
2320 * the thread group. The job control signals
2321 * do nothing in an orphaned pgrp, but SIGSTOP
2322 * always works. Note that siglock needs to be
2323 * dropped during the call to is_orphaned_pgrp()
2324 * because of lock ordering with tasklist_lock.
2325 * This allows an intervening SIGCONT to be posted.
2326 * We need to check for that and bail out if necessary.
2328 if (signr != SIGSTOP) {
2329 spin_unlock_irq(&sighand->siglock);
2331 /* signals can be posted during this window */
2333 if (is_current_pgrp_orphaned())
2336 spin_lock_irq(&sighand->siglock);
2339 if (likely(do_signal_stop(info->si_signo))) {
2340 /* It released the siglock. */
2345 * We didn't actually stop, due to a race
2346 * with SIGCONT or something like that.
2351 spin_unlock_irq(&sighand->siglock);
2354 * Anything else is fatal, maybe with a core dump.
2356 current->flags |= PF_SIGNALED;
2358 if (sig_kernel_coredump(signr)) {
2359 if (print_fatal_signals)
2360 print_fatal_signal(regs, info->si_signo);
2362 * If it was able to dump core, this kills all
2363 * other threads in the group and synchronizes with
2364 * their demise. If we lost the race with another
2365 * thread getting here, it set group_exit_code
2366 * first and our do_group_exit call below will use
2367 * that value and ignore the one we pass it.
2369 do_coredump(info->si_signo, info->si_signo, regs);
2373 * Death signals, no core dump.
2375 do_group_exit(info->si_signo);
2378 spin_unlock_irq(&sighand->siglock);
2383 * block_sigmask - add @ka's signal mask to current->blocked
2384 * @ka: action for @signr
2385 * @signr: signal that has been successfully delivered
2387 * This function should be called when a signal has succesfully been
2388 * delivered. It adds the mask of signals for @ka to current->blocked
2389 * so that they are blocked during the execution of the signal
2390 * handler. In addition, @signr will be blocked unless %SA_NODEFER is
2391 * set in @ka->sa.sa_flags.
2393 void block_sigmask(struct k_sigaction *ka, int signr)
2397 sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask);
2398 if (!(ka->sa.sa_flags & SA_NODEFER))
2399 sigaddset(&blocked, signr);
2400 set_current_blocked(&blocked);
2404 * It could be that complete_signal() picked us to notify about the
2405 * group-wide signal. Other threads should be notified now to take
2406 * the shared signals in @which since we will not.
2408 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2411 struct task_struct *t;
2413 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2414 if (sigisemptyset(&retarget))
2418 while_each_thread(tsk, t) {
2419 if (t->flags & PF_EXITING)
2422 if (!has_pending_signals(&retarget, &t->blocked))
2424 /* Remove the signals this thread can handle. */
2425 sigandsets(&retarget, &retarget, &t->blocked);
2427 if (!signal_pending(t))
2428 signal_wake_up(t, 0);
2430 if (sigisemptyset(&retarget))
2435 void exit_signals(struct task_struct *tsk)
2441 * @tsk is about to have PF_EXITING set - lock out users which
2442 * expect stable threadgroup.
2444 threadgroup_change_begin(tsk);
2446 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2447 tsk->flags |= PF_EXITING;
2448 threadgroup_change_end(tsk);
2452 spin_lock_irq(&tsk->sighand->siglock);
2454 * From now this task is not visible for group-wide signals,
2455 * see wants_signal(), do_signal_stop().
2457 tsk->flags |= PF_EXITING;
2459 threadgroup_change_end(tsk);
2461 if (!signal_pending(tsk))
2464 unblocked = tsk->blocked;
2465 signotset(&unblocked);
2466 retarget_shared_pending(tsk, &unblocked);
2468 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2469 task_participate_group_stop(tsk))
2470 group_stop = CLD_STOPPED;
2472 spin_unlock_irq(&tsk->sighand->siglock);
2475 * If group stop has completed, deliver the notification. This
2476 * should always go to the real parent of the group leader.
2478 if (unlikely(group_stop)) {
2479 read_lock(&tasklist_lock);
2480 do_notify_parent_cldstop(tsk, false, group_stop);
2481 read_unlock(&tasklist_lock);
2485 EXPORT_SYMBOL(recalc_sigpending);
2486 EXPORT_SYMBOL_GPL(dequeue_signal);
2487 EXPORT_SYMBOL(flush_signals);
2488 EXPORT_SYMBOL(force_sig);
2489 EXPORT_SYMBOL(send_sig);
2490 EXPORT_SYMBOL(send_sig_info);
2491 EXPORT_SYMBOL(sigprocmask);
2492 EXPORT_SYMBOL(block_all_signals);
2493 EXPORT_SYMBOL(unblock_all_signals);
2497 * System call entry points.
2501 * sys_restart_syscall - restart a system call
2503 SYSCALL_DEFINE0(restart_syscall)
2505 struct restart_block *restart = ¤t_thread_info()->restart_block;
2506 return restart->fn(restart);
2509 long do_no_restart_syscall(struct restart_block *param)
2514 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2516 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2517 sigset_t newblocked;
2518 /* A set of now blocked but previously unblocked signals. */
2519 sigandnsets(&newblocked, newset, ¤t->blocked);
2520 retarget_shared_pending(tsk, &newblocked);
2522 tsk->blocked = *newset;
2523 recalc_sigpending();
2527 * set_current_blocked - change current->blocked mask
2530 * It is wrong to change ->blocked directly, this helper should be used
2531 * to ensure the process can't miss a shared signal we are going to block.
2533 void set_current_blocked(const sigset_t *newset)
2535 struct task_struct *tsk = current;
2537 spin_lock_irq(&tsk->sighand->siglock);
2538 __set_task_blocked(tsk, newset);
2539 spin_unlock_irq(&tsk->sighand->siglock);
2543 * This is also useful for kernel threads that want to temporarily
2544 * (or permanently) block certain signals.
2546 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2547 * interface happily blocks "unblockable" signals like SIGKILL
2550 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2552 struct task_struct *tsk = current;
2555 /* Lockless, only current can change ->blocked, never from irq */
2557 *oldset = tsk->blocked;
2561 sigorsets(&newset, &tsk->blocked, set);
2564 sigandnsets(&newset, &tsk->blocked, set);
2573 set_current_blocked(&newset);
2578 * sys_rt_sigprocmask - change the list of currently blocked signals
2579 * @how: whether to add, remove, or set signals
2580 * @nset: stores pending signals
2581 * @oset: previous value of signal mask if non-null
2582 * @sigsetsize: size of sigset_t type
2584 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2585 sigset_t __user *, oset, size_t, sigsetsize)
2587 sigset_t old_set, new_set;
2590 /* XXX: Don't preclude handling different sized sigset_t's. */
2591 if (sigsetsize != sizeof(sigset_t))
2594 old_set = current->blocked;
2597 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2599 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2601 error = sigprocmask(how, &new_set, NULL);
2607 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2614 long do_sigpending(void __user *set, unsigned long sigsetsize)
2616 long error = -EINVAL;
2619 if (sigsetsize > sizeof(sigset_t))
2622 spin_lock_irq(¤t->sighand->siglock);
2623 sigorsets(&pending, ¤t->pending.signal,
2624 ¤t->signal->shared_pending.signal);
2625 spin_unlock_irq(¤t->sighand->siglock);
2627 /* Outside the lock because only this thread touches it. */
2628 sigandsets(&pending, ¤t->blocked, &pending);
2631 if (!copy_to_user(set, &pending, sigsetsize))
2639 * sys_rt_sigpending - examine a pending signal that has been raised
2641 * @set: stores pending signals
2642 * @sigsetsize: size of sigset_t type or larger
2644 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2646 return do_sigpending(set, sigsetsize);
2649 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2651 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2655 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2657 if (from->si_code < 0)
2658 return __copy_to_user(to, from, sizeof(siginfo_t))
2661 * If you change siginfo_t structure, please be sure
2662 * this code is fixed accordingly.
2663 * Please remember to update the signalfd_copyinfo() function
2664 * inside fs/signalfd.c too, in case siginfo_t changes.
2665 * It should never copy any pad contained in the structure
2666 * to avoid security leaks, but must copy the generic
2667 * 3 ints plus the relevant union member.
2669 err = __put_user(from->si_signo, &to->si_signo);
2670 err |= __put_user(from->si_errno, &to->si_errno);
2671 err |= __put_user((short)from->si_code, &to->si_code);
2672 switch (from->si_code & __SI_MASK) {
2674 err |= __put_user(from->si_pid, &to->si_pid);
2675 err |= __put_user(from->si_uid, &to->si_uid);
2678 err |= __put_user(from->si_tid, &to->si_tid);
2679 err |= __put_user(from->si_overrun, &to->si_overrun);
2680 err |= __put_user(from->si_ptr, &to->si_ptr);
2683 err |= __put_user(from->si_band, &to->si_band);
2684 err |= __put_user(from->si_fd, &to->si_fd);
2687 err |= __put_user(from->si_addr, &to->si_addr);
2688 #ifdef __ARCH_SI_TRAPNO
2689 err |= __put_user(from->si_trapno, &to->si_trapno);
2691 #ifdef BUS_MCEERR_AO
2693 * Other callers might not initialize the si_lsb field,
2694 * so check explicitly for the right codes here.
2696 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2697 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2701 err |= __put_user(from->si_pid, &to->si_pid);
2702 err |= __put_user(from->si_uid, &to->si_uid);
2703 err |= __put_user(from->si_status, &to->si_status);
2704 err |= __put_user(from->si_utime, &to->si_utime);
2705 err |= __put_user(from->si_stime, &to->si_stime);
2707 case __SI_RT: /* This is not generated by the kernel as of now. */
2708 case __SI_MESGQ: /* But this is */
2709 err |= __put_user(from->si_pid, &to->si_pid);
2710 err |= __put_user(from->si_uid, &to->si_uid);
2711 err |= __put_user(from->si_ptr, &to->si_ptr);
2713 default: /* this is just in case for now ... */
2714 err |= __put_user(from->si_pid, &to->si_pid);
2715 err |= __put_user(from->si_uid, &to->si_uid);
2724 * do_sigtimedwait - wait for queued signals specified in @which
2725 * @which: queued signals to wait for
2726 * @info: if non-null, the signal's siginfo is returned here
2727 * @ts: upper bound on process time suspension
2729 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2730 const struct timespec *ts)
2732 struct task_struct *tsk = current;
2733 long timeout = MAX_SCHEDULE_TIMEOUT;
2734 sigset_t mask = *which;
2738 if (!timespec_valid(ts))
2740 timeout = timespec_to_jiffies(ts);
2742 * We can be close to the next tick, add another one
2743 * to ensure we will wait at least the time asked for.
2745 if (ts->tv_sec || ts->tv_nsec)
2750 * Invert the set of allowed signals to get those we want to block.
2752 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2755 spin_lock_irq(&tsk->sighand->siglock);
2756 sig = dequeue_signal(tsk, &mask, info);
2757 if (!sig && timeout) {
2759 * None ready, temporarily unblock those we're interested
2760 * while we are sleeping in so that we'll be awakened when
2761 * they arrive. Unblocking is always fine, we can avoid
2762 * set_current_blocked().
2764 tsk->real_blocked = tsk->blocked;
2765 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2766 recalc_sigpending();
2767 spin_unlock_irq(&tsk->sighand->siglock);
2769 timeout = schedule_timeout_interruptible(timeout);
2771 spin_lock_irq(&tsk->sighand->siglock);
2772 __set_task_blocked(tsk, &tsk->real_blocked);
2773 siginitset(&tsk->real_blocked, 0);
2774 sig = dequeue_signal(tsk, &mask, info);
2776 spin_unlock_irq(&tsk->sighand->siglock);
2780 return timeout ? -EINTR : -EAGAIN;
2784 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2786 * @uthese: queued signals to wait for
2787 * @uinfo: if non-null, the signal's siginfo is returned here
2788 * @uts: upper bound on process time suspension
2789 * @sigsetsize: size of sigset_t type
2791 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2792 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2800 /* XXX: Don't preclude handling different sized sigset_t's. */
2801 if (sigsetsize != sizeof(sigset_t))
2804 if (copy_from_user(&these, uthese, sizeof(these)))
2808 if (copy_from_user(&ts, uts, sizeof(ts)))
2812 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2814 if (ret > 0 && uinfo) {
2815 if (copy_siginfo_to_user(uinfo, &info))
2823 * sys_kill - send a signal to a process
2824 * @pid: the PID of the process
2825 * @sig: signal to be sent
2827 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2829 struct siginfo info;
2831 info.si_signo = sig;
2833 info.si_code = SI_USER;
2834 info.si_pid = task_tgid_vnr(current);
2835 info.si_uid = current_uid();
2837 return kill_something_info(sig, &info, pid);
2841 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2843 struct task_struct *p;
2847 p = find_task_by_vpid(pid);
2848 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2849 error = check_kill_permission(sig, info, p);
2851 * The null signal is a permissions and process existence
2852 * probe. No signal is actually delivered.
2854 if (!error && sig) {
2855 error = do_send_sig_info(sig, info, p, false);
2857 * If lock_task_sighand() failed we pretend the task
2858 * dies after receiving the signal. The window is tiny,
2859 * and the signal is private anyway.
2861 if (unlikely(error == -ESRCH))
2870 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2872 struct siginfo info;
2874 info.si_signo = sig;
2876 info.si_code = SI_TKILL;
2877 info.si_pid = task_tgid_vnr(current);
2878 info.si_uid = current_uid();
2880 return do_send_specific(tgid, pid, sig, &info);
2884 * sys_tgkill - send signal to one specific thread
2885 * @tgid: the thread group ID of the thread
2886 * @pid: the PID of the thread
2887 * @sig: signal to be sent
2889 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2890 * exists but it's not belonging to the target process anymore. This
2891 * method solves the problem of threads exiting and PIDs getting reused.
2893 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2895 /* This is only valid for single tasks */
2896 if (pid <= 0 || tgid <= 0)
2899 return do_tkill(tgid, pid, sig);
2903 * sys_tkill - send signal to one specific task
2904 * @pid: the PID of the task
2905 * @sig: signal to be sent
2907 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2909 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2911 /* This is only valid for single tasks */
2915 return do_tkill(0, pid, sig);
2919 * sys_rt_sigqueueinfo - send signal information to a signal
2920 * @pid: the PID of the thread
2921 * @sig: signal to be sent
2922 * @uinfo: signal info to be sent
2924 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2925 siginfo_t __user *, uinfo)
2929 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2932 /* Not even root can pretend to send signals from the kernel.
2933 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2935 if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2936 /* We used to allow any < 0 si_code */
2937 WARN_ON_ONCE(info.si_code < 0);
2940 info.si_signo = sig;
2942 /* POSIX.1b doesn't mention process groups. */
2943 return kill_proc_info(sig, &info, pid);
2946 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2948 /* This is only valid for single tasks */
2949 if (pid <= 0 || tgid <= 0)
2952 /* Not even root can pretend to send signals from the kernel.
2953 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2955 if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2956 /* We used to allow any < 0 si_code */
2957 WARN_ON_ONCE(info->si_code < 0);
2960 info->si_signo = sig;
2962 return do_send_specific(tgid, pid, sig, info);
2965 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2966 siginfo_t __user *, uinfo)
2970 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2973 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2976 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2978 struct task_struct *t = current;
2979 struct k_sigaction *k;
2982 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2985 k = &t->sighand->action[sig-1];
2987 spin_lock_irq(¤t->sighand->siglock);
2992 sigdelsetmask(&act->sa.sa_mask,
2993 sigmask(SIGKILL) | sigmask(SIGSTOP));
2997 * "Setting a signal action to SIG_IGN for a signal that is
2998 * pending shall cause the pending signal to be discarded,
2999 * whether or not it is blocked."
3001 * "Setting a signal action to SIG_DFL for a signal that is
3002 * pending and whose default action is to ignore the signal
3003 * (for example, SIGCHLD), shall cause the pending signal to
3004 * be discarded, whether or not it is blocked"
3006 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3008 sigaddset(&mask, sig);
3009 rm_from_queue_full(&mask, &t->signal->shared_pending);
3011 rm_from_queue_full(&mask, &t->pending);
3013 } while (t != current);
3017 spin_unlock_irq(¤t->sighand->siglock);
3022 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3027 oss.ss_sp = (void __user *) current->sas_ss_sp;
3028 oss.ss_size = current->sas_ss_size;
3029 oss.ss_flags = sas_ss_flags(sp);
3037 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3039 error = __get_user(ss_sp, &uss->ss_sp) |
3040 __get_user(ss_flags, &uss->ss_flags) |
3041 __get_user(ss_size, &uss->ss_size);
3046 if (on_sig_stack(sp))
3051 * Note - this code used to test ss_flags incorrectly:
3052 * old code may have been written using ss_flags==0
3053 * to mean ss_flags==SS_ONSTACK (as this was the only
3054 * way that worked) - this fix preserves that older
3057 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3060 if (ss_flags == SS_DISABLE) {
3065 if (ss_size < MINSIGSTKSZ)
3069 current->sas_ss_sp = (unsigned long) ss_sp;
3070 current->sas_ss_size = ss_size;
3076 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3078 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3079 __put_user(oss.ss_size, &uoss->ss_size) |
3080 __put_user(oss.ss_flags, &uoss->ss_flags);
3087 #ifdef __ARCH_WANT_SYS_SIGPENDING
3090 * sys_sigpending - examine pending signals
3091 * @set: where mask of pending signal is returned
3093 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3095 return do_sigpending(set, sizeof(*set));
3100 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3102 * sys_sigprocmask - examine and change blocked signals
3103 * @how: whether to add, remove, or set signals
3104 * @nset: signals to add or remove (if non-null)
3105 * @oset: previous value of signal mask if non-null
3107 * Some platforms have their own version with special arguments;
3108 * others support only sys_rt_sigprocmask.
3111 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3112 old_sigset_t __user *, oset)
3114 old_sigset_t old_set, new_set;
3115 sigset_t new_blocked;
3117 old_set = current->blocked.sig[0];
3120 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3122 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
3124 new_blocked = current->blocked;
3128 sigaddsetmask(&new_blocked, new_set);
3131 sigdelsetmask(&new_blocked, new_set);
3134 new_blocked.sig[0] = new_set;
3140 set_current_blocked(&new_blocked);
3144 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3150 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3152 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
3154 * sys_rt_sigaction - alter an action taken by a process
3155 * @sig: signal to be sent
3156 * @act: new sigaction
3157 * @oact: used to save the previous sigaction
3158 * @sigsetsize: size of sigset_t type
3160 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3161 const struct sigaction __user *, act,
3162 struct sigaction __user *, oact,
3165 struct k_sigaction new_sa, old_sa;
3168 /* XXX: Don't preclude handling different sized sigset_t's. */
3169 if (sigsetsize != sizeof(sigset_t))
3173 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3177 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3180 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3186 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3188 #ifdef __ARCH_WANT_SYS_SGETMASK
3191 * For backwards compatibility. Functionality superseded by sigprocmask.
3193 SYSCALL_DEFINE0(sgetmask)
3196 return current->blocked.sig[0];
3199 SYSCALL_DEFINE1(ssetmask, int, newmask)
3201 int old = current->blocked.sig[0];
3204 siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
3205 set_current_blocked(&newset);
3209 #endif /* __ARCH_WANT_SGETMASK */
3211 #ifdef __ARCH_WANT_SYS_SIGNAL
3213 * For backwards compatibility. Functionality superseded by sigaction.
3215 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3217 struct k_sigaction new_sa, old_sa;
3220 new_sa.sa.sa_handler = handler;
3221 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3222 sigemptyset(&new_sa.sa.sa_mask);
3224 ret = do_sigaction(sig, &new_sa, &old_sa);
3226 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3228 #endif /* __ARCH_WANT_SYS_SIGNAL */
3230 #ifdef __ARCH_WANT_SYS_PAUSE
3232 SYSCALL_DEFINE0(pause)
3234 while (!signal_pending(current)) {
3235 current->state = TASK_INTERRUPTIBLE;
3238 return -ERESTARTNOHAND;
3243 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3245 * sys_rt_sigsuspend - replace the signal mask for a value with the
3246 * @unewset value until a signal is received
3247 * @unewset: new signal mask value
3248 * @sigsetsize: size of sigset_t type
3250 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3254 /* XXX: Don't preclude handling different sized sigset_t's. */
3255 if (sigsetsize != sizeof(sigset_t))
3258 if (copy_from_user(&newset, unewset, sizeof(newset)))
3260 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3262 current->saved_sigmask = current->blocked;
3263 set_current_blocked(&newset);
3265 current->state = TASK_INTERRUPTIBLE;
3267 set_restore_sigmask();
3268 return -ERESTARTNOHAND;
3270 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3272 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3277 void __init signals_init(void)
3279 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3282 #ifdef CONFIG_KGDB_KDB
3283 #include <linux/kdb.h>
3285 * kdb_send_sig_info - Allows kdb to send signals without exposing
3286 * signal internals. This function checks if the required locks are
3287 * available before calling the main signal code, to avoid kdb
3291 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3293 static struct task_struct *kdb_prev_t;
3295 if (!spin_trylock(&t->sighand->siglock)) {
3296 kdb_printf("Can't do kill command now.\n"
3297 "The sigmask lock is held somewhere else in "
3298 "kernel, try again later\n");
3301 spin_unlock(&t->sighand->siglock);
3302 new_t = kdb_prev_t != t;
3304 if (t->state != TASK_RUNNING && new_t) {
3305 kdb_printf("Process is not RUNNING, sending a signal from "
3306 "kdb risks deadlock\n"
3307 "on the run queue locks. "
3308 "The signal has _not_ been sent.\n"
3309 "Reissue the kill command if you want to risk "
3313 sig = info->si_signo;
3314 if (send_sig_info(sig, info, t))
3315 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3318 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3320 #endif /* CONFIG_KGDB_KDB */