2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/signal.h>
38 #include <asm/param.h>
39 #include <asm/uaccess.h>
40 #include <asm/unistd.h>
41 #include <asm/siginfo.h>
42 #include <asm/cacheflush.h>
43 #include "audit.h" /* audit_signal_info() */
46 * SLAB caches for signal bits.
49 static struct kmem_cache *sigqueue_cachep;
51 int print_fatal_signals __read_mostly;
53 static void __user *sig_handler(struct task_struct *t, int sig)
55 return t->sighand->action[sig - 1].sa.sa_handler;
58 static int sig_handler_ignored(void __user *handler, int sig)
60 /* Is it explicitly or implicitly ignored? */
61 return handler == SIG_IGN ||
62 (handler == SIG_DFL && sig_kernel_ignore(sig));
65 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69 handler = sig_handler(t, sig);
71 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
72 handler == SIG_DFL && !force)
75 return sig_handler_ignored(handler, sig);
78 static int sig_ignored(struct task_struct *t, int sig, bool force)
81 * Blocked signals are never ignored, since the
82 * signal handler may change by the time it is
85 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
88 if (!sig_task_ignored(t, sig, force))
92 * Tracers may want to know about even ignored signals.
98 * Re-calculate pending state from the set of locally pending
99 * signals, globally pending signals, and blocked signals.
101 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
106 switch (_NSIG_WORDS) {
108 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
109 ready |= signal->sig[i] &~ blocked->sig[i];
112 case 4: ready = signal->sig[3] &~ blocked->sig[3];
113 ready |= signal->sig[2] &~ blocked->sig[2];
114 ready |= signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
118 case 2: ready = signal->sig[1] &~ blocked->sig[1];
119 ready |= signal->sig[0] &~ blocked->sig[0];
122 case 1: ready = signal->sig[0] &~ blocked->sig[0];
127 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
129 static int recalc_sigpending_tsk(struct task_struct *t)
131 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
132 PENDING(&t->pending, &t->blocked) ||
133 PENDING(&t->signal->shared_pending, &t->blocked)) {
134 set_tsk_thread_flag(t, TIF_SIGPENDING);
138 * We must never clear the flag in another thread, or in current
139 * when it's possible the current syscall is returning -ERESTART*.
140 * So we don't clear it here, and only callers who know they should do.
146 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
147 * This is superfluous when called on current, the wakeup is a harmless no-op.
149 void recalc_sigpending_and_wake(struct task_struct *t)
151 if (recalc_sigpending_tsk(t))
152 signal_wake_up(t, 0);
155 void recalc_sigpending(void)
157 if (!recalc_sigpending_tsk(current) && !freezing(current))
158 clear_thread_flag(TIF_SIGPENDING);
162 /* Given the mask, find the first available signal that should be serviced. */
164 #define SYNCHRONOUS_MASK \
165 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
166 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
168 int next_signal(struct sigpending *pending, sigset_t *mask)
170 unsigned long i, *s, *m, x;
173 s = pending->signal.sig;
177 * Handle the first word specially: it contains the
178 * synchronous signals that need to be dequeued first.
182 if (x & SYNCHRONOUS_MASK)
183 x &= SYNCHRONOUS_MASK;
188 switch (_NSIG_WORDS) {
190 for (i = 1; i < _NSIG_WORDS; ++i) {
194 sig = ffz(~x) + i*_NSIG_BPW + 1;
203 sig = ffz(~x) + _NSIG_BPW + 1;
214 static inline void print_dropped_signal(int sig)
216 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
218 if (!print_fatal_signals)
221 if (!__ratelimit(&ratelimit_state))
224 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
225 current->comm, current->pid, sig);
229 * task_set_jobctl_pending - set jobctl pending bits
231 * @mask: pending bits to set
233 * Clear @mask from @task->jobctl. @mask must be subset of
234 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
235 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
236 * cleared. If @task is already being killed or exiting, this function
240 * Must be called with @task->sighand->siglock held.
243 * %true if @mask is set, %false if made noop because @task was dying.
245 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
247 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
248 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
249 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
251 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
254 if (mask & JOBCTL_STOP_SIGMASK)
255 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
257 task->jobctl |= mask;
262 * task_clear_jobctl_trapping - clear jobctl trapping bit
265 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
266 * Clear it and wake up the ptracer. Note that we don't need any further
267 * locking. @task->siglock guarantees that @task->parent points to the
271 * Must be called with @task->sighand->siglock held.
273 void task_clear_jobctl_trapping(struct task_struct *task)
275 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
276 task->jobctl &= ~JOBCTL_TRAPPING;
277 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
282 * task_clear_jobctl_pending - clear jobctl pending bits
284 * @mask: pending bits to clear
286 * Clear @mask from @task->jobctl. @mask must be subset of
287 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
288 * STOP bits are cleared together.
290 * If clearing of @mask leaves no stop or trap pending, this function calls
291 * task_clear_jobctl_trapping().
294 * Must be called with @task->sighand->siglock held.
296 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
298 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
300 if (mask & JOBCTL_STOP_PENDING)
301 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
303 task->jobctl &= ~mask;
305 if (!(task->jobctl & JOBCTL_PENDING_MASK))
306 task_clear_jobctl_trapping(task);
310 * task_participate_group_stop - participate in a group stop
311 * @task: task participating in a group stop
313 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
314 * Group stop states are cleared and the group stop count is consumed if
315 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
316 * stop, the appropriate %SIGNAL_* flags are set.
319 * Must be called with @task->sighand->siglock held.
322 * %true if group stop completion should be notified to the parent, %false
325 static bool task_participate_group_stop(struct task_struct *task)
327 struct signal_struct *sig = task->signal;
328 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
330 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
332 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
337 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
338 sig->group_stop_count--;
341 * Tell the caller to notify completion iff we are entering into a
342 * fresh group stop. Read comment in do_signal_stop() for details.
344 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
345 sig->flags = SIGNAL_STOP_STOPPED;
352 * allocate a new signal queue record
353 * - this may be called without locks if and only if t == current, otherwise an
354 * appropriate lock must be held to stop the target task from exiting
356 static struct sigqueue *
357 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
359 struct sigqueue *q = NULL;
360 struct user_struct *user;
363 * Protect access to @t credentials. This can go away when all
364 * callers hold rcu read lock.
367 user = get_uid(__task_cred(t)->user);
368 atomic_inc(&user->sigpending);
371 if (override_rlimit ||
372 atomic_read(&user->sigpending) <=
373 task_rlimit(t, RLIMIT_SIGPENDING)) {
374 q = kmem_cache_alloc(sigqueue_cachep, flags);
376 print_dropped_signal(sig);
379 if (unlikely(q == NULL)) {
380 atomic_dec(&user->sigpending);
383 INIT_LIST_HEAD(&q->list);
391 static void __sigqueue_free(struct sigqueue *q)
393 if (q->flags & SIGQUEUE_PREALLOC)
395 atomic_dec(&q->user->sigpending);
397 kmem_cache_free(sigqueue_cachep, q);
400 void flush_sigqueue(struct sigpending *queue)
404 sigemptyset(&queue->signal);
405 while (!list_empty(&queue->list)) {
406 q = list_entry(queue->list.next, struct sigqueue , list);
407 list_del_init(&q->list);
413 * Flush all pending signals for a task.
415 void __flush_signals(struct task_struct *t)
417 clear_tsk_thread_flag(t, TIF_SIGPENDING);
418 flush_sigqueue(&t->pending);
419 flush_sigqueue(&t->signal->shared_pending);
422 void flush_signals(struct task_struct *t)
426 spin_lock_irqsave(&t->sighand->siglock, flags);
428 spin_unlock_irqrestore(&t->sighand->siglock, flags);
431 static void __flush_itimer_signals(struct sigpending *pending)
433 sigset_t signal, retain;
434 struct sigqueue *q, *n;
436 signal = pending->signal;
437 sigemptyset(&retain);
439 list_for_each_entry_safe(q, n, &pending->list, list) {
440 int sig = q->info.si_signo;
442 if (likely(q->info.si_code != SI_TIMER)) {
443 sigaddset(&retain, sig);
445 sigdelset(&signal, sig);
446 list_del_init(&q->list);
451 sigorsets(&pending->signal, &signal, &retain);
454 void flush_itimer_signals(void)
456 struct task_struct *tsk = current;
459 spin_lock_irqsave(&tsk->sighand->siglock, flags);
460 __flush_itimer_signals(&tsk->pending);
461 __flush_itimer_signals(&tsk->signal->shared_pending);
462 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
465 void ignore_signals(struct task_struct *t)
469 for (i = 0; i < _NSIG; ++i)
470 t->sighand->action[i].sa.sa_handler = SIG_IGN;
476 * Flush all handlers for a task.
480 flush_signal_handlers(struct task_struct *t, int force_default)
483 struct k_sigaction *ka = &t->sighand->action[0];
484 for (i = _NSIG ; i != 0 ; i--) {
485 if (force_default || ka->sa.sa_handler != SIG_IGN)
486 ka->sa.sa_handler = SIG_DFL;
488 #ifdef __ARCH_HAS_SA_RESTORER
489 ka->sa.sa_restorer = NULL;
491 sigemptyset(&ka->sa.sa_mask);
496 int unhandled_signal(struct task_struct *tsk, int sig)
498 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
499 if (is_global_init(tsk))
501 if (handler != SIG_IGN && handler != SIG_DFL)
503 /* if ptraced, let the tracer determine */
508 * Notify the system that a driver wants to block all signals for this
509 * process, and wants to be notified if any signals at all were to be
510 * sent/acted upon. If the notifier routine returns non-zero, then the
511 * signal will be acted upon after all. If the notifier routine returns 0,
512 * then then signal will be blocked. Only one block per process is
513 * allowed. priv is a pointer to private data that the notifier routine
514 * can use to determine if the signal should be blocked or not.
517 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
521 spin_lock_irqsave(¤t->sighand->siglock, flags);
522 current->notifier_mask = mask;
523 current->notifier_data = priv;
524 current->notifier = notifier;
525 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
528 /* Notify the system that blocking has ended. */
531 unblock_all_signals(void)
535 spin_lock_irqsave(¤t->sighand->siglock, flags);
536 current->notifier = NULL;
537 current->notifier_data = NULL;
539 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
542 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
544 struct sigqueue *q, *first = NULL;
547 * Collect the siginfo appropriate to this signal. Check if
548 * there is another siginfo for the same signal.
550 list_for_each_entry(q, &list->list, list) {
551 if (q->info.si_signo == sig) {
558 sigdelset(&list->signal, sig);
562 list_del_init(&first->list);
563 copy_siginfo(info, &first->info);
564 __sigqueue_free(first);
567 * Ok, it wasn't in the queue. This must be
568 * a fast-pathed signal or we must have been
569 * out of queue space. So zero out the info.
571 info->si_signo = sig;
573 info->si_code = SI_USER;
579 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
582 int sig = next_signal(pending, mask);
585 if (current->notifier) {
586 if (sigismember(current->notifier_mask, sig)) {
587 if (!(current->notifier)(current->notifier_data)) {
588 clear_thread_flag(TIF_SIGPENDING);
594 collect_signal(sig, pending, info);
601 * Dequeue a signal and return the element to the caller, which is
602 * expected to free it.
604 * All callers have to hold the siglock.
606 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
610 /* We only dequeue private signals from ourselves, we don't let
611 * signalfd steal them
613 signr = __dequeue_signal(&tsk->pending, mask, info);
615 signr = __dequeue_signal(&tsk->signal->shared_pending,
620 * itimers are process shared and we restart periodic
621 * itimers in the signal delivery path to prevent DoS
622 * attacks in the high resolution timer case. This is
623 * compliant with the old way of self-restarting
624 * itimers, as the SIGALRM is a legacy signal and only
625 * queued once. Changing the restart behaviour to
626 * restart the timer in the signal dequeue path is
627 * reducing the timer noise on heavy loaded !highres
630 if (unlikely(signr == SIGALRM)) {
631 struct hrtimer *tmr = &tsk->signal->real_timer;
633 if (!hrtimer_is_queued(tmr) &&
634 tsk->signal->it_real_incr.tv64 != 0) {
635 hrtimer_forward(tmr, tmr->base->get_time(),
636 tsk->signal->it_real_incr);
637 hrtimer_restart(tmr);
646 if (unlikely(sig_kernel_stop(signr))) {
648 * Set a marker that we have dequeued a stop signal. Our
649 * caller might release the siglock and then the pending
650 * stop signal it is about to process is no longer in the
651 * pending bitmasks, but must still be cleared by a SIGCONT
652 * (and overruled by a SIGKILL). So those cases clear this
653 * shared flag after we've set it. Note that this flag may
654 * remain set after the signal we return is ignored or
655 * handled. That doesn't matter because its only purpose
656 * is to alert stop-signal processing code when another
657 * processor has come along and cleared the flag.
659 current->jobctl |= JOBCTL_STOP_DEQUEUED;
661 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
663 * Release the siglock to ensure proper locking order
664 * of timer locks outside of siglocks. Note, we leave
665 * irqs disabled here, since the posix-timers code is
666 * about to disable them again anyway.
668 spin_unlock(&tsk->sighand->siglock);
669 do_schedule_next_timer(info);
670 spin_lock(&tsk->sighand->siglock);
676 * Tell a process that it has a new active signal..
678 * NOTE! we rely on the previous spin_lock to
679 * lock interrupts for us! We can only be called with
680 * "siglock" held, and the local interrupt must
681 * have been disabled when that got acquired!
683 * No need to set need_resched since signal event passing
684 * goes through ->blocked
686 void signal_wake_up_state(struct task_struct *t, unsigned int state)
688 set_tsk_thread_flag(t, TIF_SIGPENDING);
690 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
691 * case. We don't check t->state here because there is a race with it
692 * executing another processor and just now entering stopped state.
693 * By using wake_up_state, we ensure the process will wake up and
694 * handle its death signal.
696 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
701 * Remove signals in mask from the pending set and queue.
702 * Returns 1 if any signals were found.
704 * All callers must be holding the siglock.
706 * This version takes a sigset mask and looks at all signals,
707 * not just those in the first mask word.
709 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
711 struct sigqueue *q, *n;
714 sigandsets(&m, mask, &s->signal);
715 if (sigisemptyset(&m))
718 sigandnsets(&s->signal, &s->signal, mask);
719 list_for_each_entry_safe(q, n, &s->list, list) {
720 if (sigismember(mask, q->info.si_signo)) {
721 list_del_init(&q->list);
728 * Remove signals in mask from the pending set and queue.
729 * Returns 1 if any signals were found.
731 * All callers must be holding the siglock.
733 static int rm_from_queue(unsigned long mask, struct sigpending *s)
735 struct sigqueue *q, *n;
737 if (!sigtestsetmask(&s->signal, mask))
740 sigdelsetmask(&s->signal, mask);
741 list_for_each_entry_safe(q, n, &s->list, list) {
742 if (q->info.si_signo < SIGRTMIN &&
743 (mask & sigmask(q->info.si_signo))) {
744 list_del_init(&q->list);
751 static inline int is_si_special(const struct siginfo *info)
753 return info <= SEND_SIG_FORCED;
756 static inline bool si_fromuser(const struct siginfo *info)
758 return info == SEND_SIG_NOINFO ||
759 (!is_si_special(info) && SI_FROMUSER(info));
763 * called with RCU read lock from check_kill_permission()
765 static int kill_ok_by_cred(struct task_struct *t)
767 const struct cred *cred = current_cred();
768 const struct cred *tcred = __task_cred(t);
770 if (uid_eq(cred->euid, tcred->suid) ||
771 uid_eq(cred->euid, tcred->uid) ||
772 uid_eq(cred->uid, tcred->suid) ||
773 uid_eq(cred->uid, tcred->uid))
776 if (ns_capable(tcred->user_ns, CAP_KILL))
783 * Bad permissions for sending the signal
784 * - the caller must hold the RCU read lock
786 static int check_kill_permission(int sig, struct siginfo *info,
787 struct task_struct *t)
792 if (!valid_signal(sig))
795 if (!si_fromuser(info))
798 error = audit_signal_info(sig, t); /* Let audit system see the signal */
802 if (!same_thread_group(current, t) &&
803 !kill_ok_by_cred(t)) {
806 sid = task_session(t);
808 * We don't return the error if sid == NULL. The
809 * task was unhashed, the caller must notice this.
811 if (!sid || sid == task_session(current))
818 return security_task_kill(t, info, sig, 0);
822 * ptrace_trap_notify - schedule trap to notify ptracer
823 * @t: tracee wanting to notify tracer
825 * This function schedules sticky ptrace trap which is cleared on the next
826 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
829 * If @t is running, STOP trap will be taken. If trapped for STOP and
830 * ptracer is listening for events, tracee is woken up so that it can
831 * re-trap for the new event. If trapped otherwise, STOP trap will be
832 * eventually taken without returning to userland after the existing traps
833 * are finished by PTRACE_CONT.
836 * Must be called with @task->sighand->siglock held.
838 static void ptrace_trap_notify(struct task_struct *t)
840 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
841 assert_spin_locked(&t->sighand->siglock);
843 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
844 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
848 * Handle magic process-wide effects of stop/continue signals. Unlike
849 * the signal actions, these happen immediately at signal-generation
850 * time regardless of blocking, ignoring, or handling. This does the
851 * actual continuing for SIGCONT, but not the actual stopping for stop
852 * signals. The process stop is done as a signal action for SIG_DFL.
854 * Returns true if the signal should be actually delivered, otherwise
855 * it should be dropped.
857 static int prepare_signal(int sig, struct task_struct *p, bool force)
859 struct signal_struct *signal = p->signal;
860 struct task_struct *t;
862 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
864 * The process is in the middle of dying, nothing to do.
866 } else if (sig_kernel_stop(sig)) {
868 * This is a stop signal. Remove SIGCONT from all queues.
870 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
873 rm_from_queue(sigmask(SIGCONT), &t->pending);
874 } while_each_thread(p, t);
875 } else if (sig == SIGCONT) {
878 * Remove all stop signals from all queues, wake all threads.
880 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
883 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
884 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
885 if (likely(!(t->ptrace & PT_SEIZED)))
886 wake_up_state(t, __TASK_STOPPED);
888 ptrace_trap_notify(t);
889 } while_each_thread(p, t);
892 * Notify the parent with CLD_CONTINUED if we were stopped.
894 * If we were in the middle of a group stop, we pretend it
895 * was already finished, and then continued. Since SIGCHLD
896 * doesn't queue we report only CLD_STOPPED, as if the next
897 * CLD_CONTINUED was dropped.
900 if (signal->flags & SIGNAL_STOP_STOPPED)
901 why |= SIGNAL_CLD_CONTINUED;
902 else if (signal->group_stop_count)
903 why |= SIGNAL_CLD_STOPPED;
907 * The first thread which returns from do_signal_stop()
908 * will take ->siglock, notice SIGNAL_CLD_MASK, and
909 * notify its parent. See get_signal_to_deliver().
911 signal->flags = why | SIGNAL_STOP_CONTINUED;
912 signal->group_stop_count = 0;
913 signal->group_exit_code = 0;
917 return !sig_ignored(p, sig, force);
921 * Test if P wants to take SIG. After we've checked all threads with this,
922 * it's equivalent to finding no threads not blocking SIG. Any threads not
923 * blocking SIG were ruled out because they are not running and already
924 * have pending signals. Such threads will dequeue from the shared queue
925 * as soon as they're available, so putting the signal on the shared queue
926 * will be equivalent to sending it to one such thread.
928 static inline int wants_signal(int sig, struct task_struct *p)
930 if (sigismember(&p->blocked, sig))
932 if (p->flags & PF_EXITING)
936 if (task_is_stopped_or_traced(p))
938 return task_curr(p) || !signal_pending(p);
941 static void complete_signal(int sig, struct task_struct *p, int group)
943 struct signal_struct *signal = p->signal;
944 struct task_struct *t;
947 * Now find a thread we can wake up to take the signal off the queue.
949 * If the main thread wants the signal, it gets first crack.
950 * Probably the least surprising to the average bear.
952 if (wants_signal(sig, p))
954 else if (!group || thread_group_empty(p))
956 * There is just one thread and it does not need to be woken.
957 * It will dequeue unblocked signals before it runs again.
962 * Otherwise try to find a suitable thread.
964 t = signal->curr_target;
965 while (!wants_signal(sig, t)) {
967 if (t == signal->curr_target)
969 * No thread needs to be woken.
970 * Any eligible threads will see
971 * the signal in the queue soon.
975 signal->curr_target = t;
979 * Found a killable thread. If the signal will be fatal,
980 * then start taking the whole group down immediately.
982 if (sig_fatal(p, sig) &&
983 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
984 !sigismember(&t->real_blocked, sig) &&
985 (sig == SIGKILL || !t->ptrace)) {
987 * This signal will be fatal to the whole group.
989 if (!sig_kernel_coredump(sig)) {
991 * Start a group exit and wake everybody up.
992 * This way we don't have other threads
993 * running and doing things after a slower
994 * thread has the fatal signal pending.
996 signal->flags = SIGNAL_GROUP_EXIT;
997 signal->group_exit_code = sig;
998 signal->group_stop_count = 0;
1001 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1002 sigaddset(&t->pending.signal, SIGKILL);
1003 signal_wake_up(t, 1);
1004 } while_each_thread(p, t);
1010 * The signal is already in the shared-pending queue.
1011 * Tell the chosen thread to wake up and dequeue it.
1013 signal_wake_up(t, sig == SIGKILL);
1017 static inline int legacy_queue(struct sigpending *signals, int sig)
1019 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1022 #ifdef CONFIG_USER_NS
1023 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1025 if (current_user_ns() == task_cred_xxx(t, user_ns))
1028 if (SI_FROMKERNEL(info))
1032 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1033 make_kuid(current_user_ns(), info->si_uid));
1037 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1043 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1044 int group, int from_ancestor_ns)
1046 struct sigpending *pending;
1048 int override_rlimit;
1049 int ret = 0, result;
1051 assert_spin_locked(&t->sighand->siglock);
1053 result = TRACE_SIGNAL_IGNORED;
1054 if (!prepare_signal(sig, t,
1055 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1058 pending = group ? &t->signal->shared_pending : &t->pending;
1060 * Short-circuit ignored signals and support queuing
1061 * exactly one non-rt signal, so that we can get more
1062 * detailed information about the cause of the signal.
1064 result = TRACE_SIGNAL_ALREADY_PENDING;
1065 if (legacy_queue(pending, sig))
1068 result = TRACE_SIGNAL_DELIVERED;
1070 * fast-pathed signals for kernel-internal things like SIGSTOP
1073 if (info == SEND_SIG_FORCED)
1077 * Real-time signals must be queued if sent by sigqueue, or
1078 * some other real-time mechanism. It is implementation
1079 * defined whether kill() does so. We attempt to do so, on
1080 * the principle of least surprise, but since kill is not
1081 * allowed to fail with EAGAIN when low on memory we just
1082 * make sure at least one signal gets delivered and don't
1083 * pass on the info struct.
1086 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1088 override_rlimit = 0;
1090 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1093 list_add_tail(&q->list, &pending->list);
1094 switch ((unsigned long) info) {
1095 case (unsigned long) SEND_SIG_NOINFO:
1096 q->info.si_signo = sig;
1097 q->info.si_errno = 0;
1098 q->info.si_code = SI_USER;
1099 q->info.si_pid = task_tgid_nr_ns(current,
1100 task_active_pid_ns(t));
1101 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1103 case (unsigned long) SEND_SIG_PRIV:
1104 q->info.si_signo = sig;
1105 q->info.si_errno = 0;
1106 q->info.si_code = SI_KERNEL;
1111 copy_siginfo(&q->info, info);
1112 if (from_ancestor_ns)
1117 userns_fixup_signal_uid(&q->info, t);
1119 } else if (!is_si_special(info)) {
1120 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1122 * Queue overflow, abort. We may abort if the
1123 * signal was rt and sent by user using something
1124 * other than kill().
1126 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1131 * This is a silent loss of information. We still
1132 * send the signal, but the *info bits are lost.
1134 result = TRACE_SIGNAL_LOSE_INFO;
1139 signalfd_notify(t, sig);
1140 sigaddset(&pending->signal, sig);
1141 complete_signal(sig, t, group);
1143 trace_signal_generate(sig, info, t, group, result);
1147 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1150 int from_ancestor_ns = 0;
1152 #ifdef CONFIG_PID_NS
1153 from_ancestor_ns = si_fromuser(info) &&
1154 !task_pid_nr_ns(current, task_active_pid_ns(t));
1157 return __send_signal(sig, info, t, group, from_ancestor_ns);
1160 static void print_fatal_signal(int signr)
1162 struct pt_regs *regs = signal_pt_regs();
1163 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1165 #if defined(__i386__) && !defined(__arch_um__)
1166 printk(KERN_INFO "code at %08lx: ", regs->ip);
1169 for (i = 0; i < 16; i++) {
1172 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1174 printk(KERN_CONT "%02x ", insn);
1177 printk(KERN_CONT "\n");
1184 static int __init setup_print_fatal_signals(char *str)
1186 get_option (&str, &print_fatal_signals);
1191 __setup("print-fatal-signals=", setup_print_fatal_signals);
1194 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1196 return send_signal(sig, info, p, 1);
1200 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1202 return send_signal(sig, info, t, 0);
1205 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1208 unsigned long flags;
1211 if (lock_task_sighand(p, &flags)) {
1212 ret = send_signal(sig, info, p, group);
1213 unlock_task_sighand(p, &flags);
1220 * Force a signal that the process can't ignore: if necessary
1221 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1223 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1224 * since we do not want to have a signal handler that was blocked
1225 * be invoked when user space had explicitly blocked it.
1227 * We don't want to have recursive SIGSEGV's etc, for example,
1228 * that is why we also clear SIGNAL_UNKILLABLE.
1231 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1233 unsigned long int flags;
1234 int ret, blocked, ignored;
1235 struct k_sigaction *action;
1237 spin_lock_irqsave(&t->sighand->siglock, flags);
1238 action = &t->sighand->action[sig-1];
1239 ignored = action->sa.sa_handler == SIG_IGN;
1240 blocked = sigismember(&t->blocked, sig);
1241 if (blocked || ignored) {
1242 action->sa.sa_handler = SIG_DFL;
1244 sigdelset(&t->blocked, sig);
1245 recalc_sigpending_and_wake(t);
1248 if (action->sa.sa_handler == SIG_DFL)
1249 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1250 ret = specific_send_sig_info(sig, info, t);
1251 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1257 * Nuke all other threads in the group.
1259 int zap_other_threads(struct task_struct *p)
1261 struct task_struct *t = p;
1264 p->signal->group_stop_count = 0;
1266 while_each_thread(p, t) {
1267 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1270 /* Don't bother with already dead threads */
1273 sigaddset(&t->pending.signal, SIGKILL);
1274 signal_wake_up(t, 1);
1280 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1281 unsigned long *flags)
1283 struct sighand_struct *sighand;
1286 local_irq_save(*flags);
1288 sighand = rcu_dereference(tsk->sighand);
1289 if (unlikely(sighand == NULL)) {
1291 local_irq_restore(*flags);
1295 spin_lock(&sighand->siglock);
1296 if (likely(sighand == tsk->sighand)) {
1300 spin_unlock(&sighand->siglock);
1302 local_irq_restore(*flags);
1309 * send signal info to all the members of a group
1311 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1316 ret = check_kill_permission(sig, info, p);
1320 ret = do_send_sig_info(sig, info, p, true);
1326 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1327 * control characters do (^C, ^Z etc)
1328 * - the caller must hold at least a readlock on tasklist_lock
1330 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1332 struct task_struct *p = NULL;
1333 int retval, success;
1337 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1338 int err = group_send_sig_info(sig, info, p);
1341 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1342 return success ? 0 : retval;
1345 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1348 struct task_struct *p;
1352 p = pid_task(pid, PIDTYPE_PID);
1354 error = group_send_sig_info(sig, info, p);
1355 if (unlikely(error == -ESRCH))
1357 * The task was unhashed in between, try again.
1358 * If it is dead, pid_task() will return NULL,
1359 * if we race with de_thread() it will find the
1369 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1373 error = kill_pid_info(sig, info, find_vpid(pid));
1378 static int kill_as_cred_perm(const struct cred *cred,
1379 struct task_struct *target)
1381 const struct cred *pcred = __task_cred(target);
1382 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1383 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1388 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1389 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1390 const struct cred *cred, u32 secid)
1393 struct task_struct *p;
1394 unsigned long flags;
1396 if (!valid_signal(sig))
1400 p = pid_task(pid, PIDTYPE_PID);
1405 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1409 ret = security_task_kill(p, info, sig, secid);
1414 if (lock_task_sighand(p, &flags)) {
1415 ret = __send_signal(sig, info, p, 1, 0);
1416 unlock_task_sighand(p, &flags);
1424 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1427 * kill_something_info() interprets pid in interesting ways just like kill(2).
1429 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1430 * is probably wrong. Should make it like BSD or SYSV.
1433 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1439 ret = kill_pid_info(sig, info, find_vpid(pid));
1444 read_lock(&tasklist_lock);
1446 ret = __kill_pgrp_info(sig, info,
1447 pid ? find_vpid(-pid) : task_pgrp(current));
1449 int retval = 0, count = 0;
1450 struct task_struct * p;
1452 for_each_process(p) {
1453 if (task_pid_vnr(p) > 1 &&
1454 !same_thread_group(p, current)) {
1455 int err = group_send_sig_info(sig, info, p);
1461 ret = count ? retval : -ESRCH;
1463 read_unlock(&tasklist_lock);
1469 * These are for backward compatibility with the rest of the kernel source.
1472 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1475 * Make sure legacy kernel users don't send in bad values
1476 * (normal paths check this in check_kill_permission).
1478 if (!valid_signal(sig))
1481 return do_send_sig_info(sig, info, p, false);
1484 #define __si_special(priv) \
1485 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1488 send_sig(int sig, struct task_struct *p, int priv)
1490 return send_sig_info(sig, __si_special(priv), p);
1494 force_sig(int sig, struct task_struct *p)
1496 force_sig_info(sig, SEND_SIG_PRIV, p);
1500 * When things go south during signal handling, we
1501 * will force a SIGSEGV. And if the signal that caused
1502 * the problem was already a SIGSEGV, we'll want to
1503 * make sure we don't even try to deliver the signal..
1506 force_sigsegv(int sig, struct task_struct *p)
1508 if (sig == SIGSEGV) {
1509 unsigned long flags;
1510 spin_lock_irqsave(&p->sighand->siglock, flags);
1511 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1512 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1514 force_sig(SIGSEGV, p);
1518 int kill_pgrp(struct pid *pid, int sig, int priv)
1522 read_lock(&tasklist_lock);
1523 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1524 read_unlock(&tasklist_lock);
1528 EXPORT_SYMBOL(kill_pgrp);
1530 int kill_pid(struct pid *pid, int sig, int priv)
1532 return kill_pid_info(sig, __si_special(priv), pid);
1534 EXPORT_SYMBOL(kill_pid);
1537 * These functions support sending signals using preallocated sigqueue
1538 * structures. This is needed "because realtime applications cannot
1539 * afford to lose notifications of asynchronous events, like timer
1540 * expirations or I/O completions". In the case of POSIX Timers
1541 * we allocate the sigqueue structure from the timer_create. If this
1542 * allocation fails we are able to report the failure to the application
1543 * with an EAGAIN error.
1545 struct sigqueue *sigqueue_alloc(void)
1547 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1550 q->flags |= SIGQUEUE_PREALLOC;
1555 void sigqueue_free(struct sigqueue *q)
1557 unsigned long flags;
1558 spinlock_t *lock = ¤t->sighand->siglock;
1560 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1562 * We must hold ->siglock while testing q->list
1563 * to serialize with collect_signal() or with
1564 * __exit_signal()->flush_sigqueue().
1566 spin_lock_irqsave(lock, flags);
1567 q->flags &= ~SIGQUEUE_PREALLOC;
1569 * If it is queued it will be freed when dequeued,
1570 * like the "regular" sigqueue.
1572 if (!list_empty(&q->list))
1574 spin_unlock_irqrestore(lock, flags);
1580 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1582 int sig = q->info.si_signo;
1583 struct sigpending *pending;
1584 unsigned long flags;
1587 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1590 if (!likely(lock_task_sighand(t, &flags)))
1593 ret = 1; /* the signal is ignored */
1594 result = TRACE_SIGNAL_IGNORED;
1595 if (!prepare_signal(sig, t, false))
1599 if (unlikely(!list_empty(&q->list))) {
1601 * If an SI_TIMER entry is already queue just increment
1602 * the overrun count.
1604 BUG_ON(q->info.si_code != SI_TIMER);
1605 q->info.si_overrun++;
1606 result = TRACE_SIGNAL_ALREADY_PENDING;
1609 q->info.si_overrun = 0;
1611 signalfd_notify(t, sig);
1612 pending = group ? &t->signal->shared_pending : &t->pending;
1613 list_add_tail(&q->list, &pending->list);
1614 sigaddset(&pending->signal, sig);
1615 complete_signal(sig, t, group);
1616 result = TRACE_SIGNAL_DELIVERED;
1618 trace_signal_generate(sig, &q->info, t, group, result);
1619 unlock_task_sighand(t, &flags);
1625 * Let a parent know about the death of a child.
1626 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1628 * Returns true if our parent ignored us and so we've switched to
1631 bool do_notify_parent(struct task_struct *tsk, int sig)
1633 struct siginfo info;
1634 unsigned long flags;
1635 struct sighand_struct *psig;
1636 bool autoreap = false;
1637 cputime_t utime, stime;
1641 /* do_notify_parent_cldstop should have been called instead. */
1642 BUG_ON(task_is_stopped_or_traced(tsk));
1644 BUG_ON(!tsk->ptrace &&
1645 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1647 if (sig != SIGCHLD) {
1649 * This is only possible if parent == real_parent.
1650 * Check if it has changed security domain.
1652 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1656 info.si_signo = sig;
1659 * We are under tasklist_lock here so our parent is tied to
1660 * us and cannot change.
1662 * task_active_pid_ns will always return the same pid namespace
1663 * until a task passes through release_task.
1665 * write_lock() currently calls preempt_disable() which is the
1666 * same as rcu_read_lock(), but according to Oleg, this is not
1667 * correct to rely on this
1670 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1671 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1675 task_cputime(tsk, &utime, &stime);
1676 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1677 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1679 info.si_status = tsk->exit_code & 0x7f;
1680 if (tsk->exit_code & 0x80)
1681 info.si_code = CLD_DUMPED;
1682 else if (tsk->exit_code & 0x7f)
1683 info.si_code = CLD_KILLED;
1685 info.si_code = CLD_EXITED;
1686 info.si_status = tsk->exit_code >> 8;
1689 psig = tsk->parent->sighand;
1690 spin_lock_irqsave(&psig->siglock, flags);
1691 if (!tsk->ptrace && sig == SIGCHLD &&
1692 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1693 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1695 * We are exiting and our parent doesn't care. POSIX.1
1696 * defines special semantics for setting SIGCHLD to SIG_IGN
1697 * or setting the SA_NOCLDWAIT flag: we should be reaped
1698 * automatically and not left for our parent's wait4 call.
1699 * Rather than having the parent do it as a magic kind of
1700 * signal handler, we just set this to tell do_exit that we
1701 * can be cleaned up without becoming a zombie. Note that
1702 * we still call __wake_up_parent in this case, because a
1703 * blocked sys_wait4 might now return -ECHILD.
1705 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1706 * is implementation-defined: we do (if you don't want
1707 * it, just use SIG_IGN instead).
1710 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1713 if (valid_signal(sig) && sig)
1714 __group_send_sig_info(sig, &info, tsk->parent);
1715 __wake_up_parent(tsk, tsk->parent);
1716 spin_unlock_irqrestore(&psig->siglock, flags);
1722 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1723 * @tsk: task reporting the state change
1724 * @for_ptracer: the notification is for ptracer
1725 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1727 * Notify @tsk's parent that the stopped/continued state has changed. If
1728 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1729 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1732 * Must be called with tasklist_lock at least read locked.
1734 static void do_notify_parent_cldstop(struct task_struct *tsk,
1735 bool for_ptracer, int why)
1737 struct siginfo info;
1738 unsigned long flags;
1739 struct task_struct *parent;
1740 struct sighand_struct *sighand;
1741 cputime_t utime, stime;
1744 parent = tsk->parent;
1746 tsk = tsk->group_leader;
1747 parent = tsk->real_parent;
1750 info.si_signo = SIGCHLD;
1753 * see comment in do_notify_parent() about the following 4 lines
1756 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1757 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1760 task_cputime(tsk, &utime, &stime);
1761 info.si_utime = cputime_to_clock_t(utime);
1762 info.si_stime = cputime_to_clock_t(stime);
1767 info.si_status = SIGCONT;
1770 info.si_status = tsk->signal->group_exit_code & 0x7f;
1773 info.si_status = tsk->exit_code & 0x7f;
1779 sighand = parent->sighand;
1780 spin_lock_irqsave(&sighand->siglock, flags);
1781 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1782 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1783 __group_send_sig_info(SIGCHLD, &info, parent);
1785 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1787 __wake_up_parent(tsk, parent);
1788 spin_unlock_irqrestore(&sighand->siglock, flags);
1791 static inline int may_ptrace_stop(void)
1793 if (!likely(current->ptrace))
1796 * Are we in the middle of do_coredump?
1797 * If so and our tracer is also part of the coredump stopping
1798 * is a deadlock situation, and pointless because our tracer
1799 * is dead so don't allow us to stop.
1800 * If SIGKILL was already sent before the caller unlocked
1801 * ->siglock we must see ->core_state != NULL. Otherwise it
1802 * is safe to enter schedule().
1804 * This is almost outdated, a task with the pending SIGKILL can't
1805 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1806 * after SIGKILL was already dequeued.
1808 if (unlikely(current->mm->core_state) &&
1809 unlikely(current->mm == current->parent->mm))
1816 * Return non-zero if there is a SIGKILL that should be waking us up.
1817 * Called with the siglock held.
1819 static int sigkill_pending(struct task_struct *tsk)
1821 return sigismember(&tsk->pending.signal, SIGKILL) ||
1822 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1826 * This must be called with current->sighand->siglock held.
1828 * This should be the path for all ptrace stops.
1829 * We always set current->last_siginfo while stopped here.
1830 * That makes it a way to test a stopped process for
1831 * being ptrace-stopped vs being job-control-stopped.
1833 * If we actually decide not to stop at all because the tracer
1834 * is gone, we keep current->exit_code unless clear_code.
1836 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1837 __releases(¤t->sighand->siglock)
1838 __acquires(¤t->sighand->siglock)
1840 bool gstop_done = false;
1842 if (arch_ptrace_stop_needed(exit_code, info)) {
1844 * The arch code has something special to do before a
1845 * ptrace stop. This is allowed to block, e.g. for faults
1846 * on user stack pages. We can't keep the siglock while
1847 * calling arch_ptrace_stop, so we must release it now.
1848 * To preserve proper semantics, we must do this before
1849 * any signal bookkeeping like checking group_stop_count.
1850 * Meanwhile, a SIGKILL could come in before we retake the
1851 * siglock. That must prevent us from sleeping in TASK_TRACED.
1852 * So after regaining the lock, we must check for SIGKILL.
1854 spin_unlock_irq(¤t->sighand->siglock);
1855 arch_ptrace_stop(exit_code, info);
1856 spin_lock_irq(¤t->sighand->siglock);
1857 if (sigkill_pending(current))
1862 * We're committing to trapping. TRACED should be visible before
1863 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1864 * Also, transition to TRACED and updates to ->jobctl should be
1865 * atomic with respect to siglock and should be done after the arch
1866 * hook as siglock is released and regrabbed across it.
1868 set_current_state(TASK_TRACED);
1870 current->last_siginfo = info;
1871 current->exit_code = exit_code;
1874 * If @why is CLD_STOPPED, we're trapping to participate in a group
1875 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1876 * across siglock relocks since INTERRUPT was scheduled, PENDING
1877 * could be clear now. We act as if SIGCONT is received after
1878 * TASK_TRACED is entered - ignore it.
1880 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1881 gstop_done = task_participate_group_stop(current);
1883 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1884 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1885 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1886 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1888 /* entering a trap, clear TRAPPING */
1889 task_clear_jobctl_trapping(current);
1891 spin_unlock_irq(¤t->sighand->siglock);
1892 read_lock(&tasklist_lock);
1893 if (may_ptrace_stop()) {
1895 * Notify parents of the stop.
1897 * While ptraced, there are two parents - the ptracer and
1898 * the real_parent of the group_leader. The ptracer should
1899 * know about every stop while the real parent is only
1900 * interested in the completion of group stop. The states
1901 * for the two don't interact with each other. Notify
1902 * separately unless they're gonna be duplicates.
1904 do_notify_parent_cldstop(current, true, why);
1905 if (gstop_done && ptrace_reparented(current))
1906 do_notify_parent_cldstop(current, false, why);
1909 * Don't want to allow preemption here, because
1910 * sys_ptrace() needs this task to be inactive.
1912 * XXX: implement read_unlock_no_resched().
1915 read_unlock(&tasklist_lock);
1916 preempt_enable_no_resched();
1917 freezable_schedule();
1920 * By the time we got the lock, our tracer went away.
1921 * Don't drop the lock yet, another tracer may come.
1923 * If @gstop_done, the ptracer went away between group stop
1924 * completion and here. During detach, it would have set
1925 * JOBCTL_STOP_PENDING on us and we'll re-enter
1926 * TASK_STOPPED in do_signal_stop() on return, so notifying
1927 * the real parent of the group stop completion is enough.
1930 do_notify_parent_cldstop(current, false, why);
1932 /* tasklist protects us from ptrace_freeze_traced() */
1933 __set_current_state(TASK_RUNNING);
1935 current->exit_code = 0;
1936 read_unlock(&tasklist_lock);
1940 * We are back. Now reacquire the siglock before touching
1941 * last_siginfo, so that we are sure to have synchronized with
1942 * any signal-sending on another CPU that wants to examine it.
1944 spin_lock_irq(¤t->sighand->siglock);
1945 current->last_siginfo = NULL;
1947 /* LISTENING can be set only during STOP traps, clear it */
1948 current->jobctl &= ~JOBCTL_LISTENING;
1951 * Queued signals ignored us while we were stopped for tracing.
1952 * So check for any that we should take before resuming user mode.
1953 * This sets TIF_SIGPENDING, but never clears it.
1955 recalc_sigpending_tsk(current);
1958 static void ptrace_do_notify(int signr, int exit_code, int why)
1962 memset(&info, 0, sizeof info);
1963 info.si_signo = signr;
1964 info.si_code = exit_code;
1965 info.si_pid = task_pid_vnr(current);
1966 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1968 /* Let the debugger run. */
1969 ptrace_stop(exit_code, why, 1, &info);
1972 void ptrace_notify(int exit_code)
1974 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1975 if (unlikely(current->task_works))
1978 spin_lock_irq(¤t->sighand->siglock);
1979 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1980 spin_unlock_irq(¤t->sighand->siglock);
1984 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1985 * @signr: signr causing group stop if initiating
1987 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1988 * and participate in it. If already set, participate in the existing
1989 * group stop. If participated in a group stop (and thus slept), %true is
1990 * returned with siglock released.
1992 * If ptraced, this function doesn't handle stop itself. Instead,
1993 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1994 * untouched. The caller must ensure that INTERRUPT trap handling takes
1995 * places afterwards.
1998 * Must be called with @current->sighand->siglock held, which is released
2002 * %false if group stop is already cancelled or ptrace trap is scheduled.
2003 * %true if participated in group stop.
2005 static bool do_signal_stop(int signr)
2006 __releases(¤t->sighand->siglock)
2008 struct signal_struct *sig = current->signal;
2010 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2011 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2012 struct task_struct *t;
2014 /* signr will be recorded in task->jobctl for retries */
2015 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2017 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2018 unlikely(signal_group_exit(sig)))
2021 * There is no group stop already in progress. We must
2024 * While ptraced, a task may be resumed while group stop is
2025 * still in effect and then receive a stop signal and
2026 * initiate another group stop. This deviates from the
2027 * usual behavior as two consecutive stop signals can't
2028 * cause two group stops when !ptraced. That is why we
2029 * also check !task_is_stopped(t) below.
2031 * The condition can be distinguished by testing whether
2032 * SIGNAL_STOP_STOPPED is already set. Don't generate
2033 * group_exit_code in such case.
2035 * This is not necessary for SIGNAL_STOP_CONTINUED because
2036 * an intervening stop signal is required to cause two
2037 * continued events regardless of ptrace.
2039 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2040 sig->group_exit_code = signr;
2042 sig->group_stop_count = 0;
2044 if (task_set_jobctl_pending(current, signr | gstop))
2045 sig->group_stop_count++;
2047 for (t = next_thread(current); t != current;
2048 t = next_thread(t)) {
2050 * Setting state to TASK_STOPPED for a group
2051 * stop is always done with the siglock held,
2052 * so this check has no races.
2054 if (!task_is_stopped(t) &&
2055 task_set_jobctl_pending(t, signr | gstop)) {
2056 sig->group_stop_count++;
2057 if (likely(!(t->ptrace & PT_SEIZED)))
2058 signal_wake_up(t, 0);
2060 ptrace_trap_notify(t);
2065 if (likely(!current->ptrace)) {
2069 * If there are no other threads in the group, or if there
2070 * is a group stop in progress and we are the last to stop,
2071 * report to the parent.
2073 if (task_participate_group_stop(current))
2074 notify = CLD_STOPPED;
2076 __set_current_state(TASK_STOPPED);
2077 spin_unlock_irq(¤t->sighand->siglock);
2080 * Notify the parent of the group stop completion. Because
2081 * we're not holding either the siglock or tasklist_lock
2082 * here, ptracer may attach inbetween; however, this is for
2083 * group stop and should always be delivered to the real
2084 * parent of the group leader. The new ptracer will get
2085 * its notification when this task transitions into
2089 read_lock(&tasklist_lock);
2090 do_notify_parent_cldstop(current, false, notify);
2091 read_unlock(&tasklist_lock);
2094 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2095 freezable_schedule();
2099 * While ptraced, group stop is handled by STOP trap.
2100 * Schedule it and let the caller deal with it.
2102 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2108 * do_jobctl_trap - take care of ptrace jobctl traps
2110 * When PT_SEIZED, it's used for both group stop and explicit
2111 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2112 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2113 * the stop signal; otherwise, %SIGTRAP.
2115 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2116 * number as exit_code and no siginfo.
2119 * Must be called with @current->sighand->siglock held, which may be
2120 * released and re-acquired before returning with intervening sleep.
2122 static void do_jobctl_trap(void)
2124 struct signal_struct *signal = current->signal;
2125 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2127 if (current->ptrace & PT_SEIZED) {
2128 if (!signal->group_stop_count &&
2129 !(signal->flags & SIGNAL_STOP_STOPPED))
2131 WARN_ON_ONCE(!signr);
2132 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2135 WARN_ON_ONCE(!signr);
2136 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2137 current->exit_code = 0;
2141 static int ptrace_signal(int signr, siginfo_t *info)
2143 ptrace_signal_deliver();
2145 * We do not check sig_kernel_stop(signr) but set this marker
2146 * unconditionally because we do not know whether debugger will
2147 * change signr. This flag has no meaning unless we are going
2148 * to stop after return from ptrace_stop(). In this case it will
2149 * be checked in do_signal_stop(), we should only stop if it was
2150 * not cleared by SIGCONT while we were sleeping. See also the
2151 * comment in dequeue_signal().
2153 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2154 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2156 /* We're back. Did the debugger cancel the sig? */
2157 signr = current->exit_code;
2161 current->exit_code = 0;
2164 * Update the siginfo structure if the signal has
2165 * changed. If the debugger wanted something
2166 * specific in the siginfo structure then it should
2167 * have updated *info via PTRACE_SETSIGINFO.
2169 if (signr != info->si_signo) {
2170 info->si_signo = signr;
2172 info->si_code = SI_USER;
2174 info->si_pid = task_pid_vnr(current->parent);
2175 info->si_uid = from_kuid_munged(current_user_ns(),
2176 task_uid(current->parent));
2180 /* If the (new) signal is now blocked, requeue it. */
2181 if (sigismember(¤t->blocked, signr)) {
2182 specific_send_sig_info(signr, info, current);
2189 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2190 struct pt_regs *regs, void *cookie)
2192 struct sighand_struct *sighand = current->sighand;
2193 struct signal_struct *signal = current->signal;
2196 if (unlikely(current->task_works))
2199 if (unlikely(uprobe_deny_signal()))
2203 * Do this once, we can't return to user-mode if freezing() == T.
2204 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2205 * thus do not need another check after return.
2210 spin_lock_irq(&sighand->siglock);
2212 * Every stopped thread goes here after wakeup. Check to see if
2213 * we should notify the parent, prepare_signal(SIGCONT) encodes
2214 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2216 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2219 if (signal->flags & SIGNAL_CLD_CONTINUED)
2220 why = CLD_CONTINUED;
2224 signal->flags &= ~SIGNAL_CLD_MASK;
2226 spin_unlock_irq(&sighand->siglock);
2229 * Notify the parent that we're continuing. This event is
2230 * always per-process and doesn't make whole lot of sense
2231 * for ptracers, who shouldn't consume the state via
2232 * wait(2) either, but, for backward compatibility, notify
2233 * the ptracer of the group leader too unless it's gonna be
2236 read_lock(&tasklist_lock);
2237 do_notify_parent_cldstop(current, false, why);
2239 if (ptrace_reparented(current->group_leader))
2240 do_notify_parent_cldstop(current->group_leader,
2242 read_unlock(&tasklist_lock);
2248 struct k_sigaction *ka;
2250 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2254 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2256 spin_unlock_irq(&sighand->siglock);
2260 signr = dequeue_signal(current, ¤t->blocked, info);
2263 break; /* will return 0 */
2265 if (unlikely(current->ptrace) && signr != SIGKILL) {
2266 signr = ptrace_signal(signr, info);
2271 ka = &sighand->action[signr-1];
2273 /* Trace actually delivered signals. */
2274 trace_signal_deliver(signr, info, ka);
2276 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2278 if (ka->sa.sa_handler != SIG_DFL) {
2279 /* Run the handler. */
2282 if (ka->sa.sa_flags & SA_ONESHOT)
2283 ka->sa.sa_handler = SIG_DFL;
2285 break; /* will return non-zero "signr" value */
2289 * Now we are doing the default action for this signal.
2291 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2295 * Global init gets no signals it doesn't want.
2296 * Container-init gets no signals it doesn't want from same
2299 * Note that if global/container-init sees a sig_kernel_only()
2300 * signal here, the signal must have been generated internally
2301 * or must have come from an ancestor namespace. In either
2302 * case, the signal cannot be dropped.
2304 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2305 !sig_kernel_only(signr))
2308 if (sig_kernel_stop(signr)) {
2310 * The default action is to stop all threads in
2311 * the thread group. The job control signals
2312 * do nothing in an orphaned pgrp, but SIGSTOP
2313 * always works. Note that siglock needs to be
2314 * dropped during the call to is_orphaned_pgrp()
2315 * because of lock ordering with tasklist_lock.
2316 * This allows an intervening SIGCONT to be posted.
2317 * We need to check for that and bail out if necessary.
2319 if (signr != SIGSTOP) {
2320 spin_unlock_irq(&sighand->siglock);
2322 /* signals can be posted during this window */
2324 if (is_current_pgrp_orphaned())
2327 spin_lock_irq(&sighand->siglock);
2330 if (likely(do_signal_stop(info->si_signo))) {
2331 /* It released the siglock. */
2336 * We didn't actually stop, due to a race
2337 * with SIGCONT or something like that.
2342 spin_unlock_irq(&sighand->siglock);
2345 * Anything else is fatal, maybe with a core dump.
2347 current->flags |= PF_SIGNALED;
2349 if (sig_kernel_coredump(signr)) {
2350 if (print_fatal_signals)
2351 print_fatal_signal(info->si_signo);
2353 * If it was able to dump core, this kills all
2354 * other threads in the group and synchronizes with
2355 * their demise. If we lost the race with another
2356 * thread getting here, it set group_exit_code
2357 * first and our do_group_exit call below will use
2358 * that value and ignore the one we pass it.
2364 * Death signals, no core dump.
2366 do_group_exit(info->si_signo);
2369 spin_unlock_irq(&sighand->siglock);
2374 * signal_delivered -
2375 * @sig: number of signal being delivered
2376 * @info: siginfo_t of signal being delivered
2377 * @ka: sigaction setting that chose the handler
2378 * @regs: user register state
2379 * @stepping: nonzero if debugger single-step or block-step in use
2381 * This function should be called when a signal has succesfully been
2382 * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2383 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2384 * is set in @ka->sa.sa_flags. Tracing is notified.
2386 void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2387 struct pt_regs *regs, int stepping)
2391 /* A signal was successfully delivered, and the
2392 saved sigmask was stored on the signal frame,
2393 and will be restored by sigreturn. So we can
2394 simply clear the restore sigmask flag. */
2395 clear_restore_sigmask();
2397 sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask);
2398 if (!(ka->sa.sa_flags & SA_NODEFER))
2399 sigaddset(&blocked, sig);
2400 set_current_blocked(&blocked);
2401 tracehook_signal_handler(sig, info, ka, regs, stepping);
2404 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2407 force_sigsegv(ksig->sig, current);
2409 signal_delivered(ksig->sig, &ksig->info, &ksig->ka,
2410 signal_pt_regs(), stepping);
2414 * It could be that complete_signal() picked us to notify about the
2415 * group-wide signal. Other threads should be notified now to take
2416 * the shared signals in @which since we will not.
2418 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2421 struct task_struct *t;
2423 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2424 if (sigisemptyset(&retarget))
2428 while_each_thread(tsk, t) {
2429 if (t->flags & PF_EXITING)
2432 if (!has_pending_signals(&retarget, &t->blocked))
2434 /* Remove the signals this thread can handle. */
2435 sigandsets(&retarget, &retarget, &t->blocked);
2437 if (!signal_pending(t))
2438 signal_wake_up(t, 0);
2440 if (sigisemptyset(&retarget))
2445 void exit_signals(struct task_struct *tsk)
2451 * @tsk is about to have PF_EXITING set - lock out users which
2452 * expect stable threadgroup.
2454 threadgroup_change_begin(tsk);
2456 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2457 tsk->flags |= PF_EXITING;
2458 threadgroup_change_end(tsk);
2462 spin_lock_irq(&tsk->sighand->siglock);
2464 * From now this task is not visible for group-wide signals,
2465 * see wants_signal(), do_signal_stop().
2467 tsk->flags |= PF_EXITING;
2469 threadgroup_change_end(tsk);
2471 if (!signal_pending(tsk))
2474 unblocked = tsk->blocked;
2475 signotset(&unblocked);
2476 retarget_shared_pending(tsk, &unblocked);
2478 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2479 task_participate_group_stop(tsk))
2480 group_stop = CLD_STOPPED;
2482 spin_unlock_irq(&tsk->sighand->siglock);
2485 * If group stop has completed, deliver the notification. This
2486 * should always go to the real parent of the group leader.
2488 if (unlikely(group_stop)) {
2489 read_lock(&tasklist_lock);
2490 do_notify_parent_cldstop(tsk, false, group_stop);
2491 read_unlock(&tasklist_lock);
2495 EXPORT_SYMBOL(recalc_sigpending);
2496 EXPORT_SYMBOL_GPL(dequeue_signal);
2497 EXPORT_SYMBOL(flush_signals);
2498 EXPORT_SYMBOL(force_sig);
2499 EXPORT_SYMBOL(send_sig);
2500 EXPORT_SYMBOL(send_sig_info);
2501 EXPORT_SYMBOL(sigprocmask);
2502 EXPORT_SYMBOL(block_all_signals);
2503 EXPORT_SYMBOL(unblock_all_signals);
2507 * System call entry points.
2511 * sys_restart_syscall - restart a system call
2513 SYSCALL_DEFINE0(restart_syscall)
2515 struct restart_block *restart = ¤t_thread_info()->restart_block;
2516 return restart->fn(restart);
2519 long do_no_restart_syscall(struct restart_block *param)
2524 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2526 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2527 sigset_t newblocked;
2528 /* A set of now blocked but previously unblocked signals. */
2529 sigandnsets(&newblocked, newset, ¤t->blocked);
2530 retarget_shared_pending(tsk, &newblocked);
2532 tsk->blocked = *newset;
2533 recalc_sigpending();
2537 * set_current_blocked - change current->blocked mask
2540 * It is wrong to change ->blocked directly, this helper should be used
2541 * to ensure the process can't miss a shared signal we are going to block.
2543 void set_current_blocked(sigset_t *newset)
2545 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2546 __set_current_blocked(newset);
2549 void __set_current_blocked(const sigset_t *newset)
2551 struct task_struct *tsk = current;
2553 spin_lock_irq(&tsk->sighand->siglock);
2554 __set_task_blocked(tsk, newset);
2555 spin_unlock_irq(&tsk->sighand->siglock);
2559 * This is also useful for kernel threads that want to temporarily
2560 * (or permanently) block certain signals.
2562 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2563 * interface happily blocks "unblockable" signals like SIGKILL
2566 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2568 struct task_struct *tsk = current;
2571 /* Lockless, only current can change ->blocked, never from irq */
2573 *oldset = tsk->blocked;
2577 sigorsets(&newset, &tsk->blocked, set);
2580 sigandnsets(&newset, &tsk->blocked, set);
2589 __set_current_blocked(&newset);
2594 * sys_rt_sigprocmask - change the list of currently blocked signals
2595 * @how: whether to add, remove, or set signals
2596 * @nset: stores pending signals
2597 * @oset: previous value of signal mask if non-null
2598 * @sigsetsize: size of sigset_t type
2600 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2601 sigset_t __user *, oset, size_t, sigsetsize)
2603 sigset_t old_set, new_set;
2606 /* XXX: Don't preclude handling different sized sigset_t's. */
2607 if (sigsetsize != sizeof(sigset_t))
2610 old_set = current->blocked;
2613 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2615 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2617 error = sigprocmask(how, &new_set, NULL);
2623 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2630 #ifdef CONFIG_COMPAT
2631 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2632 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2635 sigset_t old_set = current->blocked;
2637 /* XXX: Don't preclude handling different sized sigset_t's. */
2638 if (sigsetsize != sizeof(sigset_t))
2642 compat_sigset_t new32;
2645 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2648 sigset_from_compat(&new_set, &new32);
2649 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2651 error = sigprocmask(how, &new_set, NULL);
2656 compat_sigset_t old32;
2657 sigset_to_compat(&old32, &old_set);
2658 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2663 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2664 (sigset_t __user *)oset, sigsetsize);
2669 static int do_sigpending(void *set, unsigned long sigsetsize)
2671 if (sigsetsize > sizeof(sigset_t))
2674 spin_lock_irq(¤t->sighand->siglock);
2675 sigorsets(set, ¤t->pending.signal,
2676 ¤t->signal->shared_pending.signal);
2677 spin_unlock_irq(¤t->sighand->siglock);
2679 /* Outside the lock because only this thread touches it. */
2680 sigandsets(set, ¤t->blocked, set);
2685 * sys_rt_sigpending - examine a pending signal that has been raised
2687 * @uset: stores pending signals
2688 * @sigsetsize: size of sigset_t type or larger
2690 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2693 int err = do_sigpending(&set, sigsetsize);
2694 if (!err && copy_to_user(uset, &set, sigsetsize))
2699 #ifdef CONFIG_COMPAT
2700 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2701 compat_size_t, sigsetsize)
2705 int err = do_sigpending(&set, sigsetsize);
2707 compat_sigset_t set32;
2708 sigset_to_compat(&set32, &set);
2709 /* we can get here only if sigsetsize <= sizeof(set) */
2710 if (copy_to_user(uset, &set32, sigsetsize))
2715 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2720 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2722 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2726 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2728 if (from->si_code < 0)
2729 return __copy_to_user(to, from, sizeof(siginfo_t))
2732 * If you change siginfo_t structure, please be sure
2733 * this code is fixed accordingly.
2734 * Please remember to update the signalfd_copyinfo() function
2735 * inside fs/signalfd.c too, in case siginfo_t changes.
2736 * It should never copy any pad contained in the structure
2737 * to avoid security leaks, but must copy the generic
2738 * 3 ints plus the relevant union member.
2740 err = __put_user(from->si_signo, &to->si_signo);
2741 err |= __put_user(from->si_errno, &to->si_errno);
2742 err |= __put_user((short)from->si_code, &to->si_code);
2743 switch (from->si_code & __SI_MASK) {
2745 err |= __put_user(from->si_pid, &to->si_pid);
2746 err |= __put_user(from->si_uid, &to->si_uid);
2749 err |= __put_user(from->si_tid, &to->si_tid);
2750 err |= __put_user(from->si_overrun, &to->si_overrun);
2751 err |= __put_user(from->si_ptr, &to->si_ptr);
2754 err |= __put_user(from->si_band, &to->si_band);
2755 err |= __put_user(from->si_fd, &to->si_fd);
2758 err |= __put_user(from->si_addr, &to->si_addr);
2759 #ifdef __ARCH_SI_TRAPNO
2760 err |= __put_user(from->si_trapno, &to->si_trapno);
2762 #ifdef BUS_MCEERR_AO
2764 * Other callers might not initialize the si_lsb field,
2765 * so check explicitly for the right codes here.
2767 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2768 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2772 err |= __put_user(from->si_pid, &to->si_pid);
2773 err |= __put_user(from->si_uid, &to->si_uid);
2774 err |= __put_user(from->si_status, &to->si_status);
2775 err |= __put_user(from->si_utime, &to->si_utime);
2776 err |= __put_user(from->si_stime, &to->si_stime);
2778 case __SI_RT: /* This is not generated by the kernel as of now. */
2779 case __SI_MESGQ: /* But this is */
2780 err |= __put_user(from->si_pid, &to->si_pid);
2781 err |= __put_user(from->si_uid, &to->si_uid);
2782 err |= __put_user(from->si_ptr, &to->si_ptr);
2784 #ifdef __ARCH_SIGSYS
2786 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2787 err |= __put_user(from->si_syscall, &to->si_syscall);
2788 err |= __put_user(from->si_arch, &to->si_arch);
2791 default: /* this is just in case for now ... */
2792 err |= __put_user(from->si_pid, &to->si_pid);
2793 err |= __put_user(from->si_uid, &to->si_uid);
2802 * do_sigtimedwait - wait for queued signals specified in @which
2803 * @which: queued signals to wait for
2804 * @info: if non-null, the signal's siginfo is returned here
2805 * @ts: upper bound on process time suspension
2807 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2808 const struct timespec *ts)
2810 struct task_struct *tsk = current;
2811 long timeout = MAX_SCHEDULE_TIMEOUT;
2812 sigset_t mask = *which;
2816 if (!timespec_valid(ts))
2818 timeout = timespec_to_jiffies(ts);
2820 * We can be close to the next tick, add another one
2821 * to ensure we will wait at least the time asked for.
2823 if (ts->tv_sec || ts->tv_nsec)
2828 * Invert the set of allowed signals to get those we want to block.
2830 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2833 spin_lock_irq(&tsk->sighand->siglock);
2834 sig = dequeue_signal(tsk, &mask, info);
2835 if (!sig && timeout) {
2837 * None ready, temporarily unblock those we're interested
2838 * while we are sleeping in so that we'll be awakened when
2839 * they arrive. Unblocking is always fine, we can avoid
2840 * set_current_blocked().
2842 tsk->real_blocked = tsk->blocked;
2843 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2844 recalc_sigpending();
2845 spin_unlock_irq(&tsk->sighand->siglock);
2847 timeout = schedule_timeout_interruptible(timeout);
2849 spin_lock_irq(&tsk->sighand->siglock);
2850 __set_task_blocked(tsk, &tsk->real_blocked);
2851 siginitset(&tsk->real_blocked, 0);
2852 sig = dequeue_signal(tsk, &mask, info);
2854 spin_unlock_irq(&tsk->sighand->siglock);
2858 return timeout ? -EINTR : -EAGAIN;
2862 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2864 * @uthese: queued signals to wait for
2865 * @uinfo: if non-null, the signal's siginfo is returned here
2866 * @uts: upper bound on process time suspension
2867 * @sigsetsize: size of sigset_t type
2869 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2870 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2878 /* XXX: Don't preclude handling different sized sigset_t's. */
2879 if (sigsetsize != sizeof(sigset_t))
2882 if (copy_from_user(&these, uthese, sizeof(these)))
2886 if (copy_from_user(&ts, uts, sizeof(ts)))
2890 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2892 if (ret > 0 && uinfo) {
2893 if (copy_siginfo_to_user(uinfo, &info))
2901 * sys_kill - send a signal to a process
2902 * @pid: the PID of the process
2903 * @sig: signal to be sent
2905 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2907 struct siginfo info;
2909 info.si_signo = sig;
2911 info.si_code = SI_USER;
2912 info.si_pid = task_tgid_vnr(current);
2913 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2915 return kill_something_info(sig, &info, pid);
2919 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2921 struct task_struct *p;
2925 p = find_task_by_vpid(pid);
2926 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2927 error = check_kill_permission(sig, info, p);
2929 * The null signal is a permissions and process existence
2930 * probe. No signal is actually delivered.
2932 if (!error && sig) {
2933 error = do_send_sig_info(sig, info, p, false);
2935 * If lock_task_sighand() failed we pretend the task
2936 * dies after receiving the signal. The window is tiny,
2937 * and the signal is private anyway.
2939 if (unlikely(error == -ESRCH))
2948 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2950 struct siginfo info = {};
2952 info.si_signo = sig;
2954 info.si_code = SI_TKILL;
2955 info.si_pid = task_tgid_vnr(current);
2956 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2958 return do_send_specific(tgid, pid, sig, &info);
2962 * sys_tgkill - send signal to one specific thread
2963 * @tgid: the thread group ID of the thread
2964 * @pid: the PID of the thread
2965 * @sig: signal to be sent
2967 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2968 * exists but it's not belonging to the target process anymore. This
2969 * method solves the problem of threads exiting and PIDs getting reused.
2971 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2973 /* This is only valid for single tasks */
2974 if (pid <= 0 || tgid <= 0)
2977 return do_tkill(tgid, pid, sig);
2981 * sys_tkill - send signal to one specific task
2982 * @pid: the PID of the task
2983 * @sig: signal to be sent
2985 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2987 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2989 /* This is only valid for single tasks */
2993 return do_tkill(0, pid, sig);
2996 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2998 /* Not even root can pretend to send signals from the kernel.
2999 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3001 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3002 (task_pid_vnr(current) != pid)) {
3003 /* We used to allow any < 0 si_code */
3004 WARN_ON_ONCE(info->si_code < 0);
3007 info->si_signo = sig;
3009 /* POSIX.1b doesn't mention process groups. */
3010 return kill_proc_info(sig, info, pid);
3014 * sys_rt_sigqueueinfo - send signal information to a signal
3015 * @pid: the PID of the thread
3016 * @sig: signal to be sent
3017 * @uinfo: signal info to be sent
3019 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3020 siginfo_t __user *, uinfo)
3023 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3025 return do_rt_sigqueueinfo(pid, sig, &info);
3028 #ifdef CONFIG_COMPAT
3029 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3032 struct compat_siginfo __user *, uinfo)
3035 int ret = copy_siginfo_from_user32(&info, uinfo);
3038 return do_rt_sigqueueinfo(pid, sig, &info);
3042 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3044 /* This is only valid for single tasks */
3045 if (pid <= 0 || tgid <= 0)
3048 /* Not even root can pretend to send signals from the kernel.
3049 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3051 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3052 (task_pid_vnr(current) != pid)) {
3053 /* We used to allow any < 0 si_code */
3054 WARN_ON_ONCE(info->si_code < 0);
3057 info->si_signo = sig;
3059 return do_send_specific(tgid, pid, sig, info);
3062 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3063 siginfo_t __user *, uinfo)
3067 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3070 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3073 #ifdef CONFIG_COMPAT
3074 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3078 struct compat_siginfo __user *, uinfo)
3082 if (copy_siginfo_from_user32(&info, uinfo))
3084 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3088 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3090 struct task_struct *t = current;
3091 struct k_sigaction *k;
3094 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3097 k = &t->sighand->action[sig-1];
3099 spin_lock_irq(¤t->sighand->siglock);
3104 sigdelsetmask(&act->sa.sa_mask,
3105 sigmask(SIGKILL) | sigmask(SIGSTOP));
3109 * "Setting a signal action to SIG_IGN for a signal that is
3110 * pending shall cause the pending signal to be discarded,
3111 * whether or not it is blocked."
3113 * "Setting a signal action to SIG_DFL for a signal that is
3114 * pending and whose default action is to ignore the signal
3115 * (for example, SIGCHLD), shall cause the pending signal to
3116 * be discarded, whether or not it is blocked"
3118 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3120 sigaddset(&mask, sig);
3121 rm_from_queue_full(&mask, &t->signal->shared_pending);
3123 rm_from_queue_full(&mask, &t->pending);
3125 } while (t != current);
3129 spin_unlock_irq(¤t->sighand->siglock);
3134 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3139 oss.ss_sp = (void __user *) current->sas_ss_sp;
3140 oss.ss_size = current->sas_ss_size;
3141 oss.ss_flags = sas_ss_flags(sp);
3149 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3151 error = __get_user(ss_sp, &uss->ss_sp) |
3152 __get_user(ss_flags, &uss->ss_flags) |
3153 __get_user(ss_size, &uss->ss_size);
3158 if (on_sig_stack(sp))
3163 * Note - this code used to test ss_flags incorrectly:
3164 * old code may have been written using ss_flags==0
3165 * to mean ss_flags==SS_ONSTACK (as this was the only
3166 * way that worked) - this fix preserves that older
3169 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3172 if (ss_flags == SS_DISABLE) {
3177 if (ss_size < MINSIGSTKSZ)
3181 current->sas_ss_sp = (unsigned long) ss_sp;
3182 current->sas_ss_size = ss_size;
3188 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3190 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3191 __put_user(oss.ss_size, &uoss->ss_size) |
3192 __put_user(oss.ss_flags, &uoss->ss_flags);
3198 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3200 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3203 int restore_altstack(const stack_t __user *uss)
3205 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3206 /* squash all but EFAULT for now */
3207 return err == -EFAULT ? err : 0;
3210 int __save_altstack(stack_t __user *uss, unsigned long sp)
3212 struct task_struct *t = current;
3213 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3214 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3215 __put_user(t->sas_ss_size, &uss->ss_size);
3218 #ifdef CONFIG_COMPAT
3219 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3220 const compat_stack_t __user *, uss_ptr,
3221 compat_stack_t __user *, uoss_ptr)
3228 compat_stack_t uss32;
3230 memset(&uss, 0, sizeof(stack_t));
3231 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3233 uss.ss_sp = compat_ptr(uss32.ss_sp);
3234 uss.ss_flags = uss32.ss_flags;
3235 uss.ss_size = uss32.ss_size;
3239 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3240 (stack_t __force __user *) &uoss,
3241 compat_user_stack_pointer());
3243 if (ret >= 0 && uoss_ptr) {
3244 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3245 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3246 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3247 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3253 int compat_restore_altstack(const compat_stack_t __user *uss)
3255 int err = compat_sys_sigaltstack(uss, NULL);
3256 /* squash all but -EFAULT for now */
3257 return err == -EFAULT ? err : 0;
3260 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3262 struct task_struct *t = current;
3263 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3264 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3265 __put_user(t->sas_ss_size, &uss->ss_size);
3269 #ifdef __ARCH_WANT_SYS_SIGPENDING
3272 * sys_sigpending - examine pending signals
3273 * @set: where mask of pending signal is returned
3275 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3277 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3282 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3284 * sys_sigprocmask - examine and change blocked signals
3285 * @how: whether to add, remove, or set signals
3286 * @nset: signals to add or remove (if non-null)
3287 * @oset: previous value of signal mask if non-null
3289 * Some platforms have their own version with special arguments;
3290 * others support only sys_rt_sigprocmask.
3293 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3294 old_sigset_t __user *, oset)
3296 old_sigset_t old_set, new_set;
3297 sigset_t new_blocked;
3299 old_set = current->blocked.sig[0];
3302 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3305 new_blocked = current->blocked;
3309 sigaddsetmask(&new_blocked, new_set);
3312 sigdelsetmask(&new_blocked, new_set);
3315 new_blocked.sig[0] = new_set;
3321 set_current_blocked(&new_blocked);
3325 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3331 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3333 #ifndef CONFIG_ODD_RT_SIGACTION
3335 * sys_rt_sigaction - alter an action taken by a process
3336 * @sig: signal to be sent
3337 * @act: new sigaction
3338 * @oact: used to save the previous sigaction
3339 * @sigsetsize: size of sigset_t type
3341 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3342 const struct sigaction __user *, act,
3343 struct sigaction __user *, oact,
3346 struct k_sigaction new_sa, old_sa;
3349 /* XXX: Don't preclude handling different sized sigset_t's. */
3350 if (sigsetsize != sizeof(sigset_t))
3354 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3358 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3361 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3367 #ifdef CONFIG_COMPAT
3368 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3369 const struct compat_sigaction __user *, act,
3370 struct compat_sigaction __user *, oact,
3371 compat_size_t, sigsetsize)
3373 struct k_sigaction new_ka, old_ka;
3374 compat_sigset_t mask;
3375 #ifdef __ARCH_HAS_SA_RESTORER
3376 compat_uptr_t restorer;
3380 /* XXX: Don't preclude handling different sized sigset_t's. */
3381 if (sigsetsize != sizeof(compat_sigset_t))
3385 compat_uptr_t handler;
3386 ret = get_user(handler, &act->sa_handler);
3387 new_ka.sa.sa_handler = compat_ptr(handler);
3388 #ifdef __ARCH_HAS_SA_RESTORER
3389 ret |= get_user(restorer, &act->sa_restorer);
3390 new_ka.sa.sa_restorer = compat_ptr(restorer);
3392 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3393 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
3396 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3399 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3401 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3402 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3404 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3405 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3406 #ifdef __ARCH_HAS_SA_RESTORER
3407 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3408 &oact->sa_restorer);
3414 #endif /* !CONFIG_ODD_RT_SIGACTION */
3416 #ifdef CONFIG_OLD_SIGACTION
3417 SYSCALL_DEFINE3(sigaction, int, sig,
3418 const struct old_sigaction __user *, act,
3419 struct old_sigaction __user *, oact)
3421 struct k_sigaction new_ka, old_ka;
3426 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3427 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3428 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3429 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3430 __get_user(mask, &act->sa_mask))
3432 #ifdef __ARCH_HAS_KA_RESTORER
3433 new_ka.ka_restorer = NULL;
3435 siginitset(&new_ka.sa.sa_mask, mask);
3438 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3441 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3442 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3443 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3444 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3445 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3452 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3453 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3454 const struct compat_old_sigaction __user *, act,
3455 struct compat_old_sigaction __user *, oact)
3457 struct k_sigaction new_ka, old_ka;
3459 compat_old_sigset_t mask;
3460 compat_uptr_t handler, restorer;
3463 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3464 __get_user(handler, &act->sa_handler) ||
3465 __get_user(restorer, &act->sa_restorer) ||
3466 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3467 __get_user(mask, &act->sa_mask))
3470 #ifdef __ARCH_HAS_KA_RESTORER
3471 new_ka.ka_restorer = NULL;
3473 new_ka.sa.sa_handler = compat_ptr(handler);
3474 new_ka.sa.sa_restorer = compat_ptr(restorer);
3475 siginitset(&new_ka.sa.sa_mask, mask);
3478 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3481 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3482 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3483 &oact->sa_handler) ||
3484 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3485 &oact->sa_restorer) ||
3486 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3487 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3494 #ifdef __ARCH_WANT_SYS_SGETMASK
3497 * For backwards compatibility. Functionality superseded by sigprocmask.
3499 SYSCALL_DEFINE0(sgetmask)
3502 return current->blocked.sig[0];
3505 SYSCALL_DEFINE1(ssetmask, int, newmask)
3507 int old = current->blocked.sig[0];
3510 siginitset(&newset, newmask);
3511 set_current_blocked(&newset);
3515 #endif /* __ARCH_WANT_SGETMASK */
3517 #ifdef __ARCH_WANT_SYS_SIGNAL
3519 * For backwards compatibility. Functionality superseded by sigaction.
3521 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3523 struct k_sigaction new_sa, old_sa;
3526 new_sa.sa.sa_handler = handler;
3527 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3528 sigemptyset(&new_sa.sa.sa_mask);
3530 ret = do_sigaction(sig, &new_sa, &old_sa);
3532 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3534 #endif /* __ARCH_WANT_SYS_SIGNAL */
3536 #ifdef __ARCH_WANT_SYS_PAUSE
3538 SYSCALL_DEFINE0(pause)
3540 while (!signal_pending(current)) {
3541 current->state = TASK_INTERRUPTIBLE;
3544 return -ERESTARTNOHAND;
3549 int sigsuspend(sigset_t *set)
3551 current->saved_sigmask = current->blocked;
3552 set_current_blocked(set);
3554 current->state = TASK_INTERRUPTIBLE;
3556 set_restore_sigmask();
3557 return -ERESTARTNOHAND;
3561 * sys_rt_sigsuspend - replace the signal mask for a value with the
3562 * @unewset value until a signal is received
3563 * @unewset: new signal mask value
3564 * @sigsetsize: size of sigset_t type
3566 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3570 /* XXX: Don't preclude handling different sized sigset_t's. */
3571 if (sigsetsize != sizeof(sigset_t))
3574 if (copy_from_user(&newset, unewset, sizeof(newset)))
3576 return sigsuspend(&newset);
3579 #ifdef CONFIG_COMPAT
3580 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3584 compat_sigset_t newset32;
3586 /* XXX: Don't preclude handling different sized sigset_t's. */
3587 if (sigsetsize != sizeof(sigset_t))
3590 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3592 sigset_from_compat(&newset, &newset32);
3593 return sigsuspend(&newset);
3595 /* on little-endian bitmaps don't care about granularity */
3596 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3601 #ifdef CONFIG_OLD_SIGSUSPEND
3602 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3605 siginitset(&blocked, mask);
3606 return sigsuspend(&blocked);
3609 #ifdef CONFIG_OLD_SIGSUSPEND3
3610 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3613 siginitset(&blocked, mask);
3614 return sigsuspend(&blocked);
3618 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3623 void __init signals_init(void)
3625 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3628 #ifdef CONFIG_KGDB_KDB
3629 #include <linux/kdb.h>
3631 * kdb_send_sig_info - Allows kdb to send signals without exposing
3632 * signal internals. This function checks if the required locks are
3633 * available before calling the main signal code, to avoid kdb
3637 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3639 static struct task_struct *kdb_prev_t;
3641 if (!spin_trylock(&t->sighand->siglock)) {
3642 kdb_printf("Can't do kill command now.\n"
3643 "The sigmask lock is held somewhere else in "
3644 "kernel, try again later\n");
3647 spin_unlock(&t->sighand->siglock);
3648 new_t = kdb_prev_t != t;
3650 if (t->state != TASK_RUNNING && new_t) {
3651 kdb_printf("Process is not RUNNING, sending a signal from "
3652 "kdb risks deadlock\n"
3653 "on the run queue locks. "
3654 "The signal has _not_ been sent.\n"
3655 "Reissue the kill command if you want to risk "
3659 sig = info->si_signo;
3660 if (send_sig_info(sig, info, t))
3661 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3664 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3666 #endif /* CONFIG_KGDB_KDB */