2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h" /* audit_signal_info() */
49 * SLAB caches for signal bits.
52 static struct kmem_cache *sigqueue_cachep;
54 int print_fatal_signals __read_mostly;
56 static void __user *sig_handler(struct task_struct *t, int sig)
58 return t->sighand->action[sig - 1].sa.sa_handler;
61 static int sig_handler_ignored(void __user *handler, int sig)
63 /* Is it explicitly or implicitly ignored? */
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
72 handler = sig_handler(t, sig);
74 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 handler == SIG_DFL && !force)
78 return sig_handler_ignored(handler, sig);
81 static int sig_ignored(struct task_struct *t, int sig, bool force)
84 * Blocked signals are never ignored, since the
85 * signal handler may change by the time it is
88 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
91 if (!sig_task_ignored(t, sig, force))
95 * Tracers may want to know about even ignored signals.
101 * Re-calculate pending state from the set of locally pending
102 * signals, globally pending signals, and blocked signals.
104 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
109 switch (_NSIG_WORDS) {
111 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 ready |= signal->sig[i] &~ blocked->sig[i];
115 case 4: ready = signal->sig[3] &~ blocked->sig[3];
116 ready |= signal->sig[2] &~ blocked->sig[2];
117 ready |= signal->sig[1] &~ blocked->sig[1];
118 ready |= signal->sig[0] &~ blocked->sig[0];
121 case 2: ready = signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
125 case 1: ready = signal->sig[0] &~ blocked->sig[0];
130 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
132 static int recalc_sigpending_tsk(struct task_struct *t)
134 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 PENDING(&t->pending, &t->blocked) ||
136 PENDING(&t->signal->shared_pending, &t->blocked)) {
137 set_tsk_thread_flag(t, TIF_SIGPENDING);
141 * We must never clear the flag in another thread, or in current
142 * when it's possible the current syscall is returning -ERESTART*.
143 * So we don't clear it here, and only callers who know they should do.
149 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
150 * This is superfluous when called on current, the wakeup is a harmless no-op.
152 void recalc_sigpending_and_wake(struct task_struct *t)
154 if (recalc_sigpending_tsk(t))
155 signal_wake_up(t, 0);
158 void recalc_sigpending(void)
160 if (!recalc_sigpending_tsk(current) && !freezing(current))
161 clear_thread_flag(TIF_SIGPENDING);
165 /* Given the mask, find the first available signal that should be serviced. */
167 #define SYNCHRONOUS_MASK \
168 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
171 int next_signal(struct sigpending *pending, sigset_t *mask)
173 unsigned long i, *s, *m, x;
176 s = pending->signal.sig;
180 * Handle the first word specially: it contains the
181 * synchronous signals that need to be dequeued first.
185 if (x & SYNCHRONOUS_MASK)
186 x &= SYNCHRONOUS_MASK;
191 switch (_NSIG_WORDS) {
193 for (i = 1; i < _NSIG_WORDS; ++i) {
197 sig = ffz(~x) + i*_NSIG_BPW + 1;
206 sig = ffz(~x) + _NSIG_BPW + 1;
217 static inline void print_dropped_signal(int sig)
219 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
221 if (!print_fatal_signals)
224 if (!__ratelimit(&ratelimit_state))
227 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 current->comm, current->pid, sig);
232 * task_set_jobctl_pending - set jobctl pending bits
234 * @mask: pending bits to set
236 * Clear @mask from @task->jobctl. @mask must be subset of
237 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
238 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
239 * cleared. If @task is already being killed or exiting, this function
243 * Must be called with @task->sighand->siglock held.
246 * %true if @mask is set, %false if made noop because @task was dying.
248 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
250 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
254 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
257 if (mask & JOBCTL_STOP_SIGMASK)
258 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
260 task->jobctl |= mask;
265 * task_clear_jobctl_trapping - clear jobctl trapping bit
268 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
269 * Clear it and wake up the ptracer. Note that we don't need any further
270 * locking. @task->siglock guarantees that @task->parent points to the
274 * Must be called with @task->sighand->siglock held.
276 void task_clear_jobctl_trapping(struct task_struct *task)
278 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 task->jobctl &= ~JOBCTL_TRAPPING;
280 smp_mb(); /* advised by wake_up_bit() */
281 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
286 * task_clear_jobctl_pending - clear jobctl pending bits
288 * @mask: pending bits to clear
290 * Clear @mask from @task->jobctl. @mask must be subset of
291 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
292 * STOP bits are cleared together.
294 * If clearing of @mask leaves no stop or trap pending, this function calls
295 * task_clear_jobctl_trapping().
298 * Must be called with @task->sighand->siglock held.
300 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
302 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
304 if (mask & JOBCTL_STOP_PENDING)
305 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
307 task->jobctl &= ~mask;
309 if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 task_clear_jobctl_trapping(task);
314 * task_participate_group_stop - participate in a group stop
315 * @task: task participating in a group stop
317 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
318 * Group stop states are cleared and the group stop count is consumed if
319 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
320 * stop, the appropriate %SIGNAL_* flags are set.
323 * Must be called with @task->sighand->siglock held.
326 * %true if group stop completion should be notified to the parent, %false
329 static bool task_participate_group_stop(struct task_struct *task)
331 struct signal_struct *sig = task->signal;
332 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
334 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
336 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
341 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 sig->group_stop_count--;
345 * Tell the caller to notify completion iff we are entering into a
346 * fresh group stop. Read comment in do_signal_stop() for details.
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED;
356 * allocate a new signal queue record
357 * - this may be called without locks if and only if t == current, otherwise an
358 * appropriate lock must be held to stop the target task from exiting
360 static struct sigqueue *
361 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
363 struct sigqueue *q = NULL;
364 struct user_struct *user;
367 * Protect access to @t credentials. This can go away when all
368 * callers hold rcu read lock.
371 user = get_uid(__task_cred(t)->user);
372 atomic_inc(&user->sigpending);
375 if (override_rlimit ||
376 atomic_read(&user->sigpending) <=
377 task_rlimit(t, RLIMIT_SIGPENDING)) {
378 q = kmem_cache_alloc(sigqueue_cachep, flags);
380 print_dropped_signal(sig);
383 if (unlikely(q == NULL)) {
384 atomic_dec(&user->sigpending);
387 INIT_LIST_HEAD(&q->list);
395 static void __sigqueue_free(struct sigqueue *q)
397 if (q->flags & SIGQUEUE_PREALLOC)
399 atomic_dec(&q->user->sigpending);
401 kmem_cache_free(sigqueue_cachep, q);
404 void flush_sigqueue(struct sigpending *queue)
408 sigemptyset(&queue->signal);
409 while (!list_empty(&queue->list)) {
410 q = list_entry(queue->list.next, struct sigqueue , list);
411 list_del_init(&q->list);
417 * Flush all pending signals for this kthread.
419 void flush_signals(struct task_struct *t)
423 spin_lock_irqsave(&t->sighand->siglock, flags);
424 clear_tsk_thread_flag(t, TIF_SIGPENDING);
425 flush_sigqueue(&t->pending);
426 flush_sigqueue(&t->signal->shared_pending);
427 spin_unlock_irqrestore(&t->sighand->siglock, flags);
430 static void __flush_itimer_signals(struct sigpending *pending)
432 sigset_t signal, retain;
433 struct sigqueue *q, *n;
435 signal = pending->signal;
436 sigemptyset(&retain);
438 list_for_each_entry_safe(q, n, &pending->list, list) {
439 int sig = q->info.si_signo;
441 if (likely(q->info.si_code != SI_TIMER)) {
442 sigaddset(&retain, sig);
444 sigdelset(&signal, sig);
445 list_del_init(&q->list);
450 sigorsets(&pending->signal, &signal, &retain);
453 void flush_itimer_signals(void)
455 struct task_struct *tsk = current;
458 spin_lock_irqsave(&tsk->sighand->siglock, flags);
459 __flush_itimer_signals(&tsk->pending);
460 __flush_itimer_signals(&tsk->signal->shared_pending);
461 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
464 void ignore_signals(struct task_struct *t)
468 for (i = 0; i < _NSIG; ++i)
469 t->sighand->action[i].sa.sa_handler = SIG_IGN;
475 * Flush all handlers for a task.
479 flush_signal_handlers(struct task_struct *t, int force_default)
482 struct k_sigaction *ka = &t->sighand->action[0];
483 for (i = _NSIG ; i != 0 ; i--) {
484 if (force_default || ka->sa.sa_handler != SIG_IGN)
485 ka->sa.sa_handler = SIG_DFL;
487 #ifdef __ARCH_HAS_SA_RESTORER
488 ka->sa.sa_restorer = NULL;
490 sigemptyset(&ka->sa.sa_mask);
495 int unhandled_signal(struct task_struct *tsk, int sig)
497 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
498 if (is_global_init(tsk))
500 if (handler != SIG_IGN && handler != SIG_DFL)
502 /* if ptraced, let the tracer determine */
506 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
508 struct sigqueue *q, *first = NULL;
511 * Collect the siginfo appropriate to this signal. Check if
512 * there is another siginfo for the same signal.
514 list_for_each_entry(q, &list->list, list) {
515 if (q->info.si_signo == sig) {
522 sigdelset(&list->signal, sig);
526 list_del_init(&first->list);
527 copy_siginfo(info, &first->info);
528 __sigqueue_free(first);
531 * Ok, it wasn't in the queue. This must be
532 * a fast-pathed signal or we must have been
533 * out of queue space. So zero out the info.
535 info->si_signo = sig;
537 info->si_code = SI_USER;
543 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
546 int sig = next_signal(pending, mask);
549 collect_signal(sig, pending, info);
554 * Dequeue a signal and return the element to the caller, which is
555 * expected to free it.
557 * All callers have to hold the siglock.
559 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
563 /* We only dequeue private signals from ourselves, we don't let
564 * signalfd steal them
566 signr = __dequeue_signal(&tsk->pending, mask, info);
568 signr = __dequeue_signal(&tsk->signal->shared_pending,
573 * itimers are process shared and we restart periodic
574 * itimers in the signal delivery path to prevent DoS
575 * attacks in the high resolution timer case. This is
576 * compliant with the old way of self-restarting
577 * itimers, as the SIGALRM is a legacy signal and only
578 * queued once. Changing the restart behaviour to
579 * restart the timer in the signal dequeue path is
580 * reducing the timer noise on heavy loaded !highres
583 if (unlikely(signr == SIGALRM)) {
584 struct hrtimer *tmr = &tsk->signal->real_timer;
586 if (!hrtimer_is_queued(tmr) &&
587 tsk->signal->it_real_incr.tv64 != 0) {
588 hrtimer_forward(tmr, tmr->base->get_time(),
589 tsk->signal->it_real_incr);
590 hrtimer_restart(tmr);
599 if (unlikely(sig_kernel_stop(signr))) {
601 * Set a marker that we have dequeued a stop signal. Our
602 * caller might release the siglock and then the pending
603 * stop signal it is about to process is no longer in the
604 * pending bitmasks, but must still be cleared by a SIGCONT
605 * (and overruled by a SIGKILL). So those cases clear this
606 * shared flag after we've set it. Note that this flag may
607 * remain set after the signal we return is ignored or
608 * handled. That doesn't matter because its only purpose
609 * is to alert stop-signal processing code when another
610 * processor has come along and cleared the flag.
612 current->jobctl |= JOBCTL_STOP_DEQUEUED;
614 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
616 * Release the siglock to ensure proper locking order
617 * of timer locks outside of siglocks. Note, we leave
618 * irqs disabled here, since the posix-timers code is
619 * about to disable them again anyway.
621 spin_unlock(&tsk->sighand->siglock);
622 do_schedule_next_timer(info);
623 spin_lock(&tsk->sighand->siglock);
629 * Tell a process that it has a new active signal..
631 * NOTE! we rely on the previous spin_lock to
632 * lock interrupts for us! We can only be called with
633 * "siglock" held, and the local interrupt must
634 * have been disabled when that got acquired!
636 * No need to set need_resched since signal event passing
637 * goes through ->blocked
639 void signal_wake_up_state(struct task_struct *t, unsigned int state)
641 set_tsk_thread_flag(t, TIF_SIGPENDING);
643 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
644 * case. We don't check t->state here because there is a race with it
645 * executing another processor and just now entering stopped state.
646 * By using wake_up_state, we ensure the process will wake up and
647 * handle its death signal.
649 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
654 * Remove signals in mask from the pending set and queue.
655 * Returns 1 if any signals were found.
657 * All callers must be holding the siglock.
659 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
661 struct sigqueue *q, *n;
664 sigandsets(&m, mask, &s->signal);
665 if (sigisemptyset(&m))
668 sigandnsets(&s->signal, &s->signal, mask);
669 list_for_each_entry_safe(q, n, &s->list, list) {
670 if (sigismember(mask, q->info.si_signo)) {
671 list_del_init(&q->list);
678 static inline int is_si_special(const struct siginfo *info)
680 return info <= SEND_SIG_FORCED;
683 static inline bool si_fromuser(const struct siginfo *info)
685 return info == SEND_SIG_NOINFO ||
686 (!is_si_special(info) && SI_FROMUSER(info));
690 * called with RCU read lock from check_kill_permission()
692 static int kill_ok_by_cred(struct task_struct *t)
694 const struct cred *cred = current_cred();
695 const struct cred *tcred = __task_cred(t);
697 if (uid_eq(cred->euid, tcred->suid) ||
698 uid_eq(cred->euid, tcred->uid) ||
699 uid_eq(cred->uid, tcred->suid) ||
700 uid_eq(cred->uid, tcred->uid))
703 if (ns_capable(tcred->user_ns, CAP_KILL))
710 * Bad permissions for sending the signal
711 * - the caller must hold the RCU read lock
713 static int check_kill_permission(int sig, struct siginfo *info,
714 struct task_struct *t)
719 if (!valid_signal(sig))
722 if (!si_fromuser(info))
725 error = audit_signal_info(sig, t); /* Let audit system see the signal */
729 if (!same_thread_group(current, t) &&
730 !kill_ok_by_cred(t)) {
733 sid = task_session(t);
735 * We don't return the error if sid == NULL. The
736 * task was unhashed, the caller must notice this.
738 if (!sid || sid == task_session(current))
745 return security_task_kill(t, info, sig, 0);
749 * ptrace_trap_notify - schedule trap to notify ptracer
750 * @t: tracee wanting to notify tracer
752 * This function schedules sticky ptrace trap which is cleared on the next
753 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
756 * If @t is running, STOP trap will be taken. If trapped for STOP and
757 * ptracer is listening for events, tracee is woken up so that it can
758 * re-trap for the new event. If trapped otherwise, STOP trap will be
759 * eventually taken without returning to userland after the existing traps
760 * are finished by PTRACE_CONT.
763 * Must be called with @task->sighand->siglock held.
765 static void ptrace_trap_notify(struct task_struct *t)
767 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
768 assert_spin_locked(&t->sighand->siglock);
770 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
771 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
775 * Handle magic process-wide effects of stop/continue signals. Unlike
776 * the signal actions, these happen immediately at signal-generation
777 * time regardless of blocking, ignoring, or handling. This does the
778 * actual continuing for SIGCONT, but not the actual stopping for stop
779 * signals. The process stop is done as a signal action for SIG_DFL.
781 * Returns true if the signal should be actually delivered, otherwise
782 * it should be dropped.
784 static bool prepare_signal(int sig, struct task_struct *p, bool force)
786 struct signal_struct *signal = p->signal;
787 struct task_struct *t;
790 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
791 if (!(signal->flags & SIGNAL_GROUP_EXIT))
792 return sig == SIGKILL;
794 * The process is in the middle of dying, nothing to do.
796 } else if (sig_kernel_stop(sig)) {
798 * This is a stop signal. Remove SIGCONT from all queues.
800 siginitset(&flush, sigmask(SIGCONT));
801 flush_sigqueue_mask(&flush, &signal->shared_pending);
802 for_each_thread(p, t)
803 flush_sigqueue_mask(&flush, &t->pending);
804 } else if (sig == SIGCONT) {
807 * Remove all stop signals from all queues, wake all threads.
809 siginitset(&flush, SIG_KERNEL_STOP_MASK);
810 flush_sigqueue_mask(&flush, &signal->shared_pending);
811 for_each_thread(p, t) {
812 flush_sigqueue_mask(&flush, &t->pending);
813 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
814 if (likely(!(t->ptrace & PT_SEIZED)))
815 wake_up_state(t, __TASK_STOPPED);
817 ptrace_trap_notify(t);
821 * Notify the parent with CLD_CONTINUED if we were stopped.
823 * If we were in the middle of a group stop, we pretend it
824 * was already finished, and then continued. Since SIGCHLD
825 * doesn't queue we report only CLD_STOPPED, as if the next
826 * CLD_CONTINUED was dropped.
829 if (signal->flags & SIGNAL_STOP_STOPPED)
830 why |= SIGNAL_CLD_CONTINUED;
831 else if (signal->group_stop_count)
832 why |= SIGNAL_CLD_STOPPED;
836 * The first thread which returns from do_signal_stop()
837 * will take ->siglock, notice SIGNAL_CLD_MASK, and
838 * notify its parent. See get_signal_to_deliver().
840 signal->flags = why | SIGNAL_STOP_CONTINUED;
841 signal->group_stop_count = 0;
842 signal->group_exit_code = 0;
846 return !sig_ignored(p, sig, force);
850 * Test if P wants to take SIG. After we've checked all threads with this,
851 * it's equivalent to finding no threads not blocking SIG. Any threads not
852 * blocking SIG were ruled out because they are not running and already
853 * have pending signals. Such threads will dequeue from the shared queue
854 * as soon as they're available, so putting the signal on the shared queue
855 * will be equivalent to sending it to one such thread.
857 static inline int wants_signal(int sig, struct task_struct *p)
859 if (sigismember(&p->blocked, sig))
861 if (p->flags & PF_EXITING)
865 if (task_is_stopped_or_traced(p))
867 return task_curr(p) || !signal_pending(p);
870 static void complete_signal(int sig, struct task_struct *p, int group)
872 struct signal_struct *signal = p->signal;
873 struct task_struct *t;
876 * Now find a thread we can wake up to take the signal off the queue.
878 * If the main thread wants the signal, it gets first crack.
879 * Probably the least surprising to the average bear.
881 if (wants_signal(sig, p))
883 else if (!group || thread_group_empty(p))
885 * There is just one thread and it does not need to be woken.
886 * It will dequeue unblocked signals before it runs again.
891 * Otherwise try to find a suitable thread.
893 t = signal->curr_target;
894 while (!wants_signal(sig, t)) {
896 if (t == signal->curr_target)
898 * No thread needs to be woken.
899 * Any eligible threads will see
900 * the signal in the queue soon.
904 signal->curr_target = t;
908 * Found a killable thread. If the signal will be fatal,
909 * then start taking the whole group down immediately.
911 if (sig_fatal(p, sig) &&
912 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
913 !sigismember(&t->real_blocked, sig) &&
914 (sig == SIGKILL || !t->ptrace)) {
916 * This signal will be fatal to the whole group.
918 if (!sig_kernel_coredump(sig)) {
920 * Start a group exit and wake everybody up.
921 * This way we don't have other threads
922 * running and doing things after a slower
923 * thread has the fatal signal pending.
925 signal->flags = SIGNAL_GROUP_EXIT;
926 signal->group_exit_code = sig;
927 signal->group_stop_count = 0;
930 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
931 sigaddset(&t->pending.signal, SIGKILL);
932 signal_wake_up(t, 1);
933 } while_each_thread(p, t);
939 * The signal is already in the shared-pending queue.
940 * Tell the chosen thread to wake up and dequeue it.
942 signal_wake_up(t, sig == SIGKILL);
946 static inline int legacy_queue(struct sigpending *signals, int sig)
948 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
951 #ifdef CONFIG_USER_NS
952 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
954 if (current_user_ns() == task_cred_xxx(t, user_ns))
957 if (SI_FROMKERNEL(info))
961 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
962 make_kuid(current_user_ns(), info->si_uid));
966 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
972 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
973 int group, int from_ancestor_ns)
975 struct sigpending *pending;
980 assert_spin_locked(&t->sighand->siglock);
982 result = TRACE_SIGNAL_IGNORED;
983 if (!prepare_signal(sig, t,
984 from_ancestor_ns || (info == SEND_SIG_FORCED)))
987 pending = group ? &t->signal->shared_pending : &t->pending;
989 * Short-circuit ignored signals and support queuing
990 * exactly one non-rt signal, so that we can get more
991 * detailed information about the cause of the signal.
993 result = TRACE_SIGNAL_ALREADY_PENDING;
994 if (legacy_queue(pending, sig))
997 result = TRACE_SIGNAL_DELIVERED;
999 * fast-pathed signals for kernel-internal things like SIGSTOP
1002 if (info == SEND_SIG_FORCED)
1006 * Real-time signals must be queued if sent by sigqueue, or
1007 * some other real-time mechanism. It is implementation
1008 * defined whether kill() does so. We attempt to do so, on
1009 * the principle of least surprise, but since kill is not
1010 * allowed to fail with EAGAIN when low on memory we just
1011 * make sure at least one signal gets delivered and don't
1012 * pass on the info struct.
1015 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1017 override_rlimit = 0;
1019 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1022 list_add_tail(&q->list, &pending->list);
1023 switch ((unsigned long) info) {
1024 case (unsigned long) SEND_SIG_NOINFO:
1025 q->info.si_signo = sig;
1026 q->info.si_errno = 0;
1027 q->info.si_code = SI_USER;
1028 q->info.si_pid = task_tgid_nr_ns(current,
1029 task_active_pid_ns(t));
1030 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1032 case (unsigned long) SEND_SIG_PRIV:
1033 q->info.si_signo = sig;
1034 q->info.si_errno = 0;
1035 q->info.si_code = SI_KERNEL;
1040 copy_siginfo(&q->info, info);
1041 if (from_ancestor_ns)
1046 userns_fixup_signal_uid(&q->info, t);
1048 } else if (!is_si_special(info)) {
1049 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1051 * Queue overflow, abort. We may abort if the
1052 * signal was rt and sent by user using something
1053 * other than kill().
1055 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1060 * This is a silent loss of information. We still
1061 * send the signal, but the *info bits are lost.
1063 result = TRACE_SIGNAL_LOSE_INFO;
1068 signalfd_notify(t, sig);
1069 sigaddset(&pending->signal, sig);
1070 complete_signal(sig, t, group);
1072 trace_signal_generate(sig, info, t, group, result);
1076 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1079 int from_ancestor_ns = 0;
1081 #ifdef CONFIG_PID_NS
1082 from_ancestor_ns = si_fromuser(info) &&
1083 !task_pid_nr_ns(current, task_active_pid_ns(t));
1086 return __send_signal(sig, info, t, group, from_ancestor_ns);
1089 static void print_fatal_signal(int signr)
1091 struct pt_regs *regs = signal_pt_regs();
1092 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1094 #if defined(__i386__) && !defined(__arch_um__)
1095 printk(KERN_INFO "code at %08lx: ", regs->ip);
1098 for (i = 0; i < 16; i++) {
1101 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1103 printk(KERN_CONT "%02x ", insn);
1106 printk(KERN_CONT "\n");
1113 static int __init setup_print_fatal_signals(char *str)
1115 get_option (&str, &print_fatal_signals);
1120 __setup("print-fatal-signals=", setup_print_fatal_signals);
1123 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1125 return send_signal(sig, info, p, 1);
1129 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1131 return send_signal(sig, info, t, 0);
1134 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1137 unsigned long flags;
1140 if (lock_task_sighand(p, &flags)) {
1141 ret = send_signal(sig, info, p, group);
1142 unlock_task_sighand(p, &flags);
1149 * Force a signal that the process can't ignore: if necessary
1150 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1152 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1153 * since we do not want to have a signal handler that was blocked
1154 * be invoked when user space had explicitly blocked it.
1156 * We don't want to have recursive SIGSEGV's etc, for example,
1157 * that is why we also clear SIGNAL_UNKILLABLE.
1160 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1162 unsigned long int flags;
1163 int ret, blocked, ignored;
1164 struct k_sigaction *action;
1166 spin_lock_irqsave(&t->sighand->siglock, flags);
1167 action = &t->sighand->action[sig-1];
1168 ignored = action->sa.sa_handler == SIG_IGN;
1169 blocked = sigismember(&t->blocked, sig);
1170 if (blocked || ignored) {
1171 action->sa.sa_handler = SIG_DFL;
1173 sigdelset(&t->blocked, sig);
1174 recalc_sigpending_and_wake(t);
1177 if (action->sa.sa_handler == SIG_DFL)
1178 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1179 ret = specific_send_sig_info(sig, info, t);
1180 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1186 * Nuke all other threads in the group.
1188 int zap_other_threads(struct task_struct *p)
1190 struct task_struct *t = p;
1193 p->signal->group_stop_count = 0;
1195 while_each_thread(p, t) {
1196 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1199 /* Don't bother with already dead threads */
1202 sigaddset(&t->pending.signal, SIGKILL);
1203 signal_wake_up(t, 1);
1209 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1210 unsigned long *flags)
1212 struct sighand_struct *sighand;
1216 * Disable interrupts early to avoid deadlocks.
1217 * See rcu_read_unlock() comment header for details.
1219 local_irq_save(*flags);
1221 sighand = rcu_dereference(tsk->sighand);
1222 if (unlikely(sighand == NULL)) {
1224 local_irq_restore(*flags);
1228 * This sighand can be already freed and even reused, but
1229 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1230 * initializes ->siglock: this slab can't go away, it has
1231 * the same object type, ->siglock can't be reinitialized.
1233 * We need to ensure that tsk->sighand is still the same
1234 * after we take the lock, we can race with de_thread() or
1235 * __exit_signal(). In the latter case the next iteration
1236 * must see ->sighand == NULL.
1238 spin_lock(&sighand->siglock);
1239 if (likely(sighand == tsk->sighand)) {
1243 spin_unlock(&sighand->siglock);
1245 local_irq_restore(*flags);
1252 * send signal info to all the members of a group
1254 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1259 ret = check_kill_permission(sig, info, p);
1263 ret = do_send_sig_info(sig, info, p, true);
1269 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1270 * control characters do (^C, ^Z etc)
1271 * - the caller must hold at least a readlock on tasklist_lock
1273 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1275 struct task_struct *p = NULL;
1276 int retval, success;
1280 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1281 int err = group_send_sig_info(sig, info, p);
1284 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1285 return success ? 0 : retval;
1288 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1291 struct task_struct *p;
1295 p = pid_task(pid, PIDTYPE_PID);
1297 error = group_send_sig_info(sig, info, p);
1299 if (likely(!p || error != -ESRCH))
1303 * The task was unhashed in between, try again. If it
1304 * is dead, pid_task() will return NULL, if we race with
1305 * de_thread() it will find the new leader.
1310 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1314 error = kill_pid_info(sig, info, find_vpid(pid));
1319 static int kill_as_cred_perm(const struct cred *cred,
1320 struct task_struct *target)
1322 const struct cred *pcred = __task_cred(target);
1323 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1324 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1329 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1330 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1331 const struct cred *cred, u32 secid)
1334 struct task_struct *p;
1335 unsigned long flags;
1337 if (!valid_signal(sig))
1341 p = pid_task(pid, PIDTYPE_PID);
1346 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1350 ret = security_task_kill(p, info, sig, secid);
1355 if (lock_task_sighand(p, &flags)) {
1356 ret = __send_signal(sig, info, p, 1, 0);
1357 unlock_task_sighand(p, &flags);
1365 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1368 * kill_something_info() interprets pid in interesting ways just like kill(2).
1370 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1371 * is probably wrong. Should make it like BSD or SYSV.
1374 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1380 ret = kill_pid_info(sig, info, find_vpid(pid));
1385 read_lock(&tasklist_lock);
1387 ret = __kill_pgrp_info(sig, info,
1388 pid ? find_vpid(-pid) : task_pgrp(current));
1390 int retval = 0, count = 0;
1391 struct task_struct * p;
1393 for_each_process(p) {
1394 if (task_pid_vnr(p) > 1 &&
1395 !same_thread_group(p, current)) {
1396 int err = group_send_sig_info(sig, info, p);
1402 ret = count ? retval : -ESRCH;
1404 read_unlock(&tasklist_lock);
1410 * These are for backward compatibility with the rest of the kernel source.
1413 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1416 * Make sure legacy kernel users don't send in bad values
1417 * (normal paths check this in check_kill_permission).
1419 if (!valid_signal(sig))
1422 return do_send_sig_info(sig, info, p, false);
1425 #define __si_special(priv) \
1426 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1429 send_sig(int sig, struct task_struct *p, int priv)
1431 return send_sig_info(sig, __si_special(priv), p);
1435 force_sig(int sig, struct task_struct *p)
1437 force_sig_info(sig, SEND_SIG_PRIV, p);
1441 * When things go south during signal handling, we
1442 * will force a SIGSEGV. And if the signal that caused
1443 * the problem was already a SIGSEGV, we'll want to
1444 * make sure we don't even try to deliver the signal..
1447 force_sigsegv(int sig, struct task_struct *p)
1449 if (sig == SIGSEGV) {
1450 unsigned long flags;
1451 spin_lock_irqsave(&p->sighand->siglock, flags);
1452 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1453 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1455 force_sig(SIGSEGV, p);
1459 int kill_pgrp(struct pid *pid, int sig, int priv)
1463 read_lock(&tasklist_lock);
1464 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1465 read_unlock(&tasklist_lock);
1469 EXPORT_SYMBOL(kill_pgrp);
1471 int kill_pid(struct pid *pid, int sig, int priv)
1473 return kill_pid_info(sig, __si_special(priv), pid);
1475 EXPORT_SYMBOL(kill_pid);
1478 * These functions support sending signals using preallocated sigqueue
1479 * structures. This is needed "because realtime applications cannot
1480 * afford to lose notifications of asynchronous events, like timer
1481 * expirations or I/O completions". In the case of POSIX Timers
1482 * we allocate the sigqueue structure from the timer_create. If this
1483 * allocation fails we are able to report the failure to the application
1484 * with an EAGAIN error.
1486 struct sigqueue *sigqueue_alloc(void)
1488 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1491 q->flags |= SIGQUEUE_PREALLOC;
1496 void sigqueue_free(struct sigqueue *q)
1498 unsigned long flags;
1499 spinlock_t *lock = ¤t->sighand->siglock;
1501 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1503 * We must hold ->siglock while testing q->list
1504 * to serialize with collect_signal() or with
1505 * __exit_signal()->flush_sigqueue().
1507 spin_lock_irqsave(lock, flags);
1508 q->flags &= ~SIGQUEUE_PREALLOC;
1510 * If it is queued it will be freed when dequeued,
1511 * like the "regular" sigqueue.
1513 if (!list_empty(&q->list))
1515 spin_unlock_irqrestore(lock, flags);
1521 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1523 int sig = q->info.si_signo;
1524 struct sigpending *pending;
1525 unsigned long flags;
1528 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1531 if (!likely(lock_task_sighand(t, &flags)))
1534 ret = 1; /* the signal is ignored */
1535 result = TRACE_SIGNAL_IGNORED;
1536 if (!prepare_signal(sig, t, false))
1540 if (unlikely(!list_empty(&q->list))) {
1542 * If an SI_TIMER entry is already queue just increment
1543 * the overrun count.
1545 BUG_ON(q->info.si_code != SI_TIMER);
1546 q->info.si_overrun++;
1547 result = TRACE_SIGNAL_ALREADY_PENDING;
1550 q->info.si_overrun = 0;
1552 signalfd_notify(t, sig);
1553 pending = group ? &t->signal->shared_pending : &t->pending;
1554 list_add_tail(&q->list, &pending->list);
1555 sigaddset(&pending->signal, sig);
1556 complete_signal(sig, t, group);
1557 result = TRACE_SIGNAL_DELIVERED;
1559 trace_signal_generate(sig, &q->info, t, group, result);
1560 unlock_task_sighand(t, &flags);
1566 * Let a parent know about the death of a child.
1567 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1569 * Returns true if our parent ignored us and so we've switched to
1572 bool do_notify_parent(struct task_struct *tsk, int sig)
1574 struct siginfo info;
1575 unsigned long flags;
1576 struct sighand_struct *psig;
1577 bool autoreap = false;
1578 cputime_t utime, stime;
1582 /* do_notify_parent_cldstop should have been called instead. */
1583 BUG_ON(task_is_stopped_or_traced(tsk));
1585 BUG_ON(!tsk->ptrace &&
1586 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1588 if (sig != SIGCHLD) {
1590 * This is only possible if parent == real_parent.
1591 * Check if it has changed security domain.
1593 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1597 info.si_signo = sig;
1600 * We are under tasklist_lock here so our parent is tied to
1601 * us and cannot change.
1603 * task_active_pid_ns will always return the same pid namespace
1604 * until a task passes through release_task.
1606 * write_lock() currently calls preempt_disable() which is the
1607 * same as rcu_read_lock(), but according to Oleg, this is not
1608 * correct to rely on this
1611 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1612 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1616 task_cputime(tsk, &utime, &stime);
1617 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1618 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1620 info.si_status = tsk->exit_code & 0x7f;
1621 if (tsk->exit_code & 0x80)
1622 info.si_code = CLD_DUMPED;
1623 else if (tsk->exit_code & 0x7f)
1624 info.si_code = CLD_KILLED;
1626 info.si_code = CLD_EXITED;
1627 info.si_status = tsk->exit_code >> 8;
1630 psig = tsk->parent->sighand;
1631 spin_lock_irqsave(&psig->siglock, flags);
1632 if (!tsk->ptrace && sig == SIGCHLD &&
1633 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1634 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1636 * We are exiting and our parent doesn't care. POSIX.1
1637 * defines special semantics for setting SIGCHLD to SIG_IGN
1638 * or setting the SA_NOCLDWAIT flag: we should be reaped
1639 * automatically and not left for our parent's wait4 call.
1640 * Rather than having the parent do it as a magic kind of
1641 * signal handler, we just set this to tell do_exit that we
1642 * can be cleaned up without becoming a zombie. Note that
1643 * we still call __wake_up_parent in this case, because a
1644 * blocked sys_wait4 might now return -ECHILD.
1646 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1647 * is implementation-defined: we do (if you don't want
1648 * it, just use SIG_IGN instead).
1651 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1654 if (valid_signal(sig) && sig)
1655 __group_send_sig_info(sig, &info, tsk->parent);
1656 __wake_up_parent(tsk, tsk->parent);
1657 spin_unlock_irqrestore(&psig->siglock, flags);
1663 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1664 * @tsk: task reporting the state change
1665 * @for_ptracer: the notification is for ptracer
1666 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1668 * Notify @tsk's parent that the stopped/continued state has changed. If
1669 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1670 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1673 * Must be called with tasklist_lock at least read locked.
1675 static void do_notify_parent_cldstop(struct task_struct *tsk,
1676 bool for_ptracer, int why)
1678 struct siginfo info;
1679 unsigned long flags;
1680 struct task_struct *parent;
1681 struct sighand_struct *sighand;
1682 cputime_t utime, stime;
1685 parent = tsk->parent;
1687 tsk = tsk->group_leader;
1688 parent = tsk->real_parent;
1691 info.si_signo = SIGCHLD;
1694 * see comment in do_notify_parent() about the following 4 lines
1697 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1698 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1701 task_cputime(tsk, &utime, &stime);
1702 info.si_utime = cputime_to_clock_t(utime);
1703 info.si_stime = cputime_to_clock_t(stime);
1708 info.si_status = SIGCONT;
1711 info.si_status = tsk->signal->group_exit_code & 0x7f;
1714 info.si_status = tsk->exit_code & 0x7f;
1720 sighand = parent->sighand;
1721 spin_lock_irqsave(&sighand->siglock, flags);
1722 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1723 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1724 __group_send_sig_info(SIGCHLD, &info, parent);
1726 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1728 __wake_up_parent(tsk, parent);
1729 spin_unlock_irqrestore(&sighand->siglock, flags);
1732 static inline int may_ptrace_stop(void)
1734 if (!likely(current->ptrace))
1737 * Are we in the middle of do_coredump?
1738 * If so and our tracer is also part of the coredump stopping
1739 * is a deadlock situation, and pointless because our tracer
1740 * is dead so don't allow us to stop.
1741 * If SIGKILL was already sent before the caller unlocked
1742 * ->siglock we must see ->core_state != NULL. Otherwise it
1743 * is safe to enter schedule().
1745 * This is almost outdated, a task with the pending SIGKILL can't
1746 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1747 * after SIGKILL was already dequeued.
1749 if (unlikely(current->mm->core_state) &&
1750 unlikely(current->mm == current->parent->mm))
1757 * Return non-zero if there is a SIGKILL that should be waking us up.
1758 * Called with the siglock held.
1760 static int sigkill_pending(struct task_struct *tsk)
1762 return sigismember(&tsk->pending.signal, SIGKILL) ||
1763 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1767 * This must be called with current->sighand->siglock held.
1769 * This should be the path for all ptrace stops.
1770 * We always set current->last_siginfo while stopped here.
1771 * That makes it a way to test a stopped process for
1772 * being ptrace-stopped vs being job-control-stopped.
1774 * If we actually decide not to stop at all because the tracer
1775 * is gone, we keep current->exit_code unless clear_code.
1777 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1778 __releases(¤t->sighand->siglock)
1779 __acquires(¤t->sighand->siglock)
1781 bool gstop_done = false;
1783 if (arch_ptrace_stop_needed(exit_code, info)) {
1785 * The arch code has something special to do before a
1786 * ptrace stop. This is allowed to block, e.g. for faults
1787 * on user stack pages. We can't keep the siglock while
1788 * calling arch_ptrace_stop, so we must release it now.
1789 * To preserve proper semantics, we must do this before
1790 * any signal bookkeeping like checking group_stop_count.
1791 * Meanwhile, a SIGKILL could come in before we retake the
1792 * siglock. That must prevent us from sleeping in TASK_TRACED.
1793 * So after regaining the lock, we must check for SIGKILL.
1795 spin_unlock_irq(¤t->sighand->siglock);
1796 arch_ptrace_stop(exit_code, info);
1797 spin_lock_irq(¤t->sighand->siglock);
1798 if (sigkill_pending(current))
1803 * We're committing to trapping. TRACED should be visible before
1804 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1805 * Also, transition to TRACED and updates to ->jobctl should be
1806 * atomic with respect to siglock and should be done after the arch
1807 * hook as siglock is released and regrabbed across it.
1809 set_current_state(TASK_TRACED);
1811 current->last_siginfo = info;
1812 current->exit_code = exit_code;
1815 * If @why is CLD_STOPPED, we're trapping to participate in a group
1816 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1817 * across siglock relocks since INTERRUPT was scheduled, PENDING
1818 * could be clear now. We act as if SIGCONT is received after
1819 * TASK_TRACED is entered - ignore it.
1821 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1822 gstop_done = task_participate_group_stop(current);
1824 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1825 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1826 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1827 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1829 /* entering a trap, clear TRAPPING */
1830 task_clear_jobctl_trapping(current);
1832 spin_unlock_irq(¤t->sighand->siglock);
1833 read_lock(&tasklist_lock);
1834 if (may_ptrace_stop()) {
1836 * Notify parents of the stop.
1838 * While ptraced, there are two parents - the ptracer and
1839 * the real_parent of the group_leader. The ptracer should
1840 * know about every stop while the real parent is only
1841 * interested in the completion of group stop. The states
1842 * for the two don't interact with each other. Notify
1843 * separately unless they're gonna be duplicates.
1845 do_notify_parent_cldstop(current, true, why);
1846 if (gstop_done && ptrace_reparented(current))
1847 do_notify_parent_cldstop(current, false, why);
1850 * Don't want to allow preemption here, because
1851 * sys_ptrace() needs this task to be inactive.
1853 * XXX: implement read_unlock_no_resched().
1856 read_unlock(&tasklist_lock);
1857 preempt_enable_no_resched();
1858 freezable_schedule();
1861 * By the time we got the lock, our tracer went away.
1862 * Don't drop the lock yet, another tracer may come.
1864 * If @gstop_done, the ptracer went away between group stop
1865 * completion and here. During detach, it would have set
1866 * JOBCTL_STOP_PENDING on us and we'll re-enter
1867 * TASK_STOPPED in do_signal_stop() on return, so notifying
1868 * the real parent of the group stop completion is enough.
1871 do_notify_parent_cldstop(current, false, why);
1873 /* tasklist protects us from ptrace_freeze_traced() */
1874 __set_current_state(TASK_RUNNING);
1876 current->exit_code = 0;
1877 read_unlock(&tasklist_lock);
1881 * We are back. Now reacquire the siglock before touching
1882 * last_siginfo, so that we are sure to have synchronized with
1883 * any signal-sending on another CPU that wants to examine it.
1885 spin_lock_irq(¤t->sighand->siglock);
1886 current->last_siginfo = NULL;
1888 /* LISTENING can be set only during STOP traps, clear it */
1889 current->jobctl &= ~JOBCTL_LISTENING;
1892 * Queued signals ignored us while we were stopped for tracing.
1893 * So check for any that we should take before resuming user mode.
1894 * This sets TIF_SIGPENDING, but never clears it.
1896 recalc_sigpending_tsk(current);
1899 static void ptrace_do_notify(int signr, int exit_code, int why)
1903 memset(&info, 0, sizeof info);
1904 info.si_signo = signr;
1905 info.si_code = exit_code;
1906 info.si_pid = task_pid_vnr(current);
1907 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1909 /* Let the debugger run. */
1910 ptrace_stop(exit_code, why, 1, &info);
1913 void ptrace_notify(int exit_code)
1915 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1916 if (unlikely(current->task_works))
1919 spin_lock_irq(¤t->sighand->siglock);
1920 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1921 spin_unlock_irq(¤t->sighand->siglock);
1925 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1926 * @signr: signr causing group stop if initiating
1928 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1929 * and participate in it. If already set, participate in the existing
1930 * group stop. If participated in a group stop (and thus slept), %true is
1931 * returned with siglock released.
1933 * If ptraced, this function doesn't handle stop itself. Instead,
1934 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1935 * untouched. The caller must ensure that INTERRUPT trap handling takes
1936 * places afterwards.
1939 * Must be called with @current->sighand->siglock held, which is released
1943 * %false if group stop is already cancelled or ptrace trap is scheduled.
1944 * %true if participated in group stop.
1946 static bool do_signal_stop(int signr)
1947 __releases(¤t->sighand->siglock)
1949 struct signal_struct *sig = current->signal;
1951 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1952 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1953 struct task_struct *t;
1955 /* signr will be recorded in task->jobctl for retries */
1956 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1958 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1959 unlikely(signal_group_exit(sig)))
1962 * There is no group stop already in progress. We must
1965 * While ptraced, a task may be resumed while group stop is
1966 * still in effect and then receive a stop signal and
1967 * initiate another group stop. This deviates from the
1968 * usual behavior as two consecutive stop signals can't
1969 * cause two group stops when !ptraced. That is why we
1970 * also check !task_is_stopped(t) below.
1972 * The condition can be distinguished by testing whether
1973 * SIGNAL_STOP_STOPPED is already set. Don't generate
1974 * group_exit_code in such case.
1976 * This is not necessary for SIGNAL_STOP_CONTINUED because
1977 * an intervening stop signal is required to cause two
1978 * continued events regardless of ptrace.
1980 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1981 sig->group_exit_code = signr;
1983 sig->group_stop_count = 0;
1985 if (task_set_jobctl_pending(current, signr | gstop))
1986 sig->group_stop_count++;
1989 while_each_thread(current, t) {
1991 * Setting state to TASK_STOPPED for a group
1992 * stop is always done with the siglock held,
1993 * so this check has no races.
1995 if (!task_is_stopped(t) &&
1996 task_set_jobctl_pending(t, signr | gstop)) {
1997 sig->group_stop_count++;
1998 if (likely(!(t->ptrace & PT_SEIZED)))
1999 signal_wake_up(t, 0);
2001 ptrace_trap_notify(t);
2006 if (likely(!current->ptrace)) {
2010 * If there are no other threads in the group, or if there
2011 * is a group stop in progress and we are the last to stop,
2012 * report to the parent.
2014 if (task_participate_group_stop(current))
2015 notify = CLD_STOPPED;
2017 __set_current_state(TASK_STOPPED);
2018 spin_unlock_irq(¤t->sighand->siglock);
2021 * Notify the parent of the group stop completion. Because
2022 * we're not holding either the siglock or tasklist_lock
2023 * here, ptracer may attach inbetween; however, this is for
2024 * group stop and should always be delivered to the real
2025 * parent of the group leader. The new ptracer will get
2026 * its notification when this task transitions into
2030 read_lock(&tasklist_lock);
2031 do_notify_parent_cldstop(current, false, notify);
2032 read_unlock(&tasklist_lock);
2035 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2036 freezable_schedule();
2040 * While ptraced, group stop is handled by STOP trap.
2041 * Schedule it and let the caller deal with it.
2043 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2049 * do_jobctl_trap - take care of ptrace jobctl traps
2051 * When PT_SEIZED, it's used for both group stop and explicit
2052 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2053 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2054 * the stop signal; otherwise, %SIGTRAP.
2056 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2057 * number as exit_code and no siginfo.
2060 * Must be called with @current->sighand->siglock held, which may be
2061 * released and re-acquired before returning with intervening sleep.
2063 static void do_jobctl_trap(void)
2065 struct signal_struct *signal = current->signal;
2066 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2068 if (current->ptrace & PT_SEIZED) {
2069 if (!signal->group_stop_count &&
2070 !(signal->flags & SIGNAL_STOP_STOPPED))
2072 WARN_ON_ONCE(!signr);
2073 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2076 WARN_ON_ONCE(!signr);
2077 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2078 current->exit_code = 0;
2082 static int ptrace_signal(int signr, siginfo_t *info)
2084 ptrace_signal_deliver();
2086 * We do not check sig_kernel_stop(signr) but set this marker
2087 * unconditionally because we do not know whether debugger will
2088 * change signr. This flag has no meaning unless we are going
2089 * to stop after return from ptrace_stop(). In this case it will
2090 * be checked in do_signal_stop(), we should only stop if it was
2091 * not cleared by SIGCONT while we were sleeping. See also the
2092 * comment in dequeue_signal().
2094 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2095 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2097 /* We're back. Did the debugger cancel the sig? */
2098 signr = current->exit_code;
2102 current->exit_code = 0;
2105 * Update the siginfo structure if the signal has
2106 * changed. If the debugger wanted something
2107 * specific in the siginfo structure then it should
2108 * have updated *info via PTRACE_SETSIGINFO.
2110 if (signr != info->si_signo) {
2111 info->si_signo = signr;
2113 info->si_code = SI_USER;
2115 info->si_pid = task_pid_vnr(current->parent);
2116 info->si_uid = from_kuid_munged(current_user_ns(),
2117 task_uid(current->parent));
2121 /* If the (new) signal is now blocked, requeue it. */
2122 if (sigismember(¤t->blocked, signr)) {
2123 specific_send_sig_info(signr, info, current);
2130 int get_signal(struct ksignal *ksig)
2132 struct sighand_struct *sighand = current->sighand;
2133 struct signal_struct *signal = current->signal;
2136 if (unlikely(current->task_works))
2139 if (unlikely(uprobe_deny_signal()))
2143 * Do this once, we can't return to user-mode if freezing() == T.
2144 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2145 * thus do not need another check after return.
2150 spin_lock_irq(&sighand->siglock);
2152 * Every stopped thread goes here after wakeup. Check to see if
2153 * we should notify the parent, prepare_signal(SIGCONT) encodes
2154 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2156 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2159 if (signal->flags & SIGNAL_CLD_CONTINUED)
2160 why = CLD_CONTINUED;
2164 signal->flags &= ~SIGNAL_CLD_MASK;
2166 spin_unlock_irq(&sighand->siglock);
2169 * Notify the parent that we're continuing. This event is
2170 * always per-process and doesn't make whole lot of sense
2171 * for ptracers, who shouldn't consume the state via
2172 * wait(2) either, but, for backward compatibility, notify
2173 * the ptracer of the group leader too unless it's gonna be
2176 read_lock(&tasklist_lock);
2177 do_notify_parent_cldstop(current, false, why);
2179 if (ptrace_reparented(current->group_leader))
2180 do_notify_parent_cldstop(current->group_leader,
2182 read_unlock(&tasklist_lock);
2188 struct k_sigaction *ka;
2190 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2194 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2196 spin_unlock_irq(&sighand->siglock);
2200 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2203 break; /* will return 0 */
2205 if (unlikely(current->ptrace) && signr != SIGKILL) {
2206 signr = ptrace_signal(signr, &ksig->info);
2211 ka = &sighand->action[signr-1];
2213 /* Trace actually delivered signals. */
2214 trace_signal_deliver(signr, &ksig->info, ka);
2216 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2218 if (ka->sa.sa_handler != SIG_DFL) {
2219 /* Run the handler. */
2222 if (ka->sa.sa_flags & SA_ONESHOT)
2223 ka->sa.sa_handler = SIG_DFL;
2225 break; /* will return non-zero "signr" value */
2229 * Now we are doing the default action for this signal.
2231 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2235 * Global init gets no signals it doesn't want.
2236 * Container-init gets no signals it doesn't want from same
2239 * Note that if global/container-init sees a sig_kernel_only()
2240 * signal here, the signal must have been generated internally
2241 * or must have come from an ancestor namespace. In either
2242 * case, the signal cannot be dropped.
2244 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2245 !sig_kernel_only(signr))
2248 if (sig_kernel_stop(signr)) {
2250 * The default action is to stop all threads in
2251 * the thread group. The job control signals
2252 * do nothing in an orphaned pgrp, but SIGSTOP
2253 * always works. Note that siglock needs to be
2254 * dropped during the call to is_orphaned_pgrp()
2255 * because of lock ordering with tasklist_lock.
2256 * This allows an intervening SIGCONT to be posted.
2257 * We need to check for that and bail out if necessary.
2259 if (signr != SIGSTOP) {
2260 spin_unlock_irq(&sighand->siglock);
2262 /* signals can be posted during this window */
2264 if (is_current_pgrp_orphaned())
2267 spin_lock_irq(&sighand->siglock);
2270 if (likely(do_signal_stop(ksig->info.si_signo))) {
2271 /* It released the siglock. */
2276 * We didn't actually stop, due to a race
2277 * with SIGCONT or something like that.
2282 spin_unlock_irq(&sighand->siglock);
2285 * Anything else is fatal, maybe with a core dump.
2287 current->flags |= PF_SIGNALED;
2289 if (sig_kernel_coredump(signr)) {
2290 if (print_fatal_signals)
2291 print_fatal_signal(ksig->info.si_signo);
2292 proc_coredump_connector(current);
2294 * If it was able to dump core, this kills all
2295 * other threads in the group and synchronizes with
2296 * their demise. If we lost the race with another
2297 * thread getting here, it set group_exit_code
2298 * first and our do_group_exit call below will use
2299 * that value and ignore the one we pass it.
2301 do_coredump(&ksig->info);
2305 * Death signals, no core dump.
2307 do_group_exit(ksig->info.si_signo);
2310 spin_unlock_irq(&sighand->siglock);
2313 return ksig->sig > 0;
2317 * signal_delivered -
2318 * @ksig: kernel signal struct
2319 * @stepping: nonzero if debugger single-step or block-step in use
2321 * This function should be called when a signal has successfully been
2322 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2323 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2324 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2326 static void signal_delivered(struct ksignal *ksig, int stepping)
2330 /* A signal was successfully delivered, and the
2331 saved sigmask was stored on the signal frame,
2332 and will be restored by sigreturn. So we can
2333 simply clear the restore sigmask flag. */
2334 clear_restore_sigmask();
2336 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2337 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2338 sigaddset(&blocked, ksig->sig);
2339 set_current_blocked(&blocked);
2340 tracehook_signal_handler(stepping);
2343 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2346 force_sigsegv(ksig->sig, current);
2348 signal_delivered(ksig, stepping);
2352 * It could be that complete_signal() picked us to notify about the
2353 * group-wide signal. Other threads should be notified now to take
2354 * the shared signals in @which since we will not.
2356 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2359 struct task_struct *t;
2361 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2362 if (sigisemptyset(&retarget))
2366 while_each_thread(tsk, t) {
2367 if (t->flags & PF_EXITING)
2370 if (!has_pending_signals(&retarget, &t->blocked))
2372 /* Remove the signals this thread can handle. */
2373 sigandsets(&retarget, &retarget, &t->blocked);
2375 if (!signal_pending(t))
2376 signal_wake_up(t, 0);
2378 if (sigisemptyset(&retarget))
2383 void exit_signals(struct task_struct *tsk)
2389 * @tsk is about to have PF_EXITING set - lock out users which
2390 * expect stable threadgroup.
2392 threadgroup_change_begin(tsk);
2394 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2395 tsk->flags |= PF_EXITING;
2396 threadgroup_change_end(tsk);
2400 spin_lock_irq(&tsk->sighand->siglock);
2402 * From now this task is not visible for group-wide signals,
2403 * see wants_signal(), do_signal_stop().
2405 tsk->flags |= PF_EXITING;
2407 threadgroup_change_end(tsk);
2409 if (!signal_pending(tsk))
2412 unblocked = tsk->blocked;
2413 signotset(&unblocked);
2414 retarget_shared_pending(tsk, &unblocked);
2416 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2417 task_participate_group_stop(tsk))
2418 group_stop = CLD_STOPPED;
2420 spin_unlock_irq(&tsk->sighand->siglock);
2423 * If group stop has completed, deliver the notification. This
2424 * should always go to the real parent of the group leader.
2426 if (unlikely(group_stop)) {
2427 read_lock(&tasklist_lock);
2428 do_notify_parent_cldstop(tsk, false, group_stop);
2429 read_unlock(&tasklist_lock);
2433 EXPORT_SYMBOL(recalc_sigpending);
2434 EXPORT_SYMBOL_GPL(dequeue_signal);
2435 EXPORT_SYMBOL(flush_signals);
2436 EXPORT_SYMBOL(force_sig);
2437 EXPORT_SYMBOL(send_sig);
2438 EXPORT_SYMBOL(send_sig_info);
2439 EXPORT_SYMBOL(sigprocmask);
2442 * System call entry points.
2446 * sys_restart_syscall - restart a system call
2448 SYSCALL_DEFINE0(restart_syscall)
2450 struct restart_block *restart = ¤t->restart_block;
2451 return restart->fn(restart);
2454 long do_no_restart_syscall(struct restart_block *param)
2459 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2461 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2462 sigset_t newblocked;
2463 /* A set of now blocked but previously unblocked signals. */
2464 sigandnsets(&newblocked, newset, ¤t->blocked);
2465 retarget_shared_pending(tsk, &newblocked);
2467 tsk->blocked = *newset;
2468 recalc_sigpending();
2472 * set_current_blocked - change current->blocked mask
2475 * It is wrong to change ->blocked directly, this helper should be used
2476 * to ensure the process can't miss a shared signal we are going to block.
2478 void set_current_blocked(sigset_t *newset)
2480 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2481 __set_current_blocked(newset);
2484 void __set_current_blocked(const sigset_t *newset)
2486 struct task_struct *tsk = current;
2488 spin_lock_irq(&tsk->sighand->siglock);
2489 __set_task_blocked(tsk, newset);
2490 spin_unlock_irq(&tsk->sighand->siglock);
2494 * This is also useful for kernel threads that want to temporarily
2495 * (or permanently) block certain signals.
2497 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2498 * interface happily blocks "unblockable" signals like SIGKILL
2501 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2503 struct task_struct *tsk = current;
2506 /* Lockless, only current can change ->blocked, never from irq */
2508 *oldset = tsk->blocked;
2512 sigorsets(&newset, &tsk->blocked, set);
2515 sigandnsets(&newset, &tsk->blocked, set);
2524 __set_current_blocked(&newset);
2529 * sys_rt_sigprocmask - change the list of currently blocked signals
2530 * @how: whether to add, remove, or set signals
2531 * @nset: stores pending signals
2532 * @oset: previous value of signal mask if non-null
2533 * @sigsetsize: size of sigset_t type
2535 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2536 sigset_t __user *, oset, size_t, sigsetsize)
2538 sigset_t old_set, new_set;
2541 /* XXX: Don't preclude handling different sized sigset_t's. */
2542 if (sigsetsize != sizeof(sigset_t))
2545 old_set = current->blocked;
2548 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2550 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2552 error = sigprocmask(how, &new_set, NULL);
2558 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2565 #ifdef CONFIG_COMPAT
2566 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2567 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2570 sigset_t old_set = current->blocked;
2572 /* XXX: Don't preclude handling different sized sigset_t's. */
2573 if (sigsetsize != sizeof(sigset_t))
2577 compat_sigset_t new32;
2580 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2583 sigset_from_compat(&new_set, &new32);
2584 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2586 error = sigprocmask(how, &new_set, NULL);
2591 compat_sigset_t old32;
2592 sigset_to_compat(&old32, &old_set);
2593 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2598 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2599 (sigset_t __user *)oset, sigsetsize);
2604 static int do_sigpending(void *set, unsigned long sigsetsize)
2606 if (sigsetsize > sizeof(sigset_t))
2609 spin_lock_irq(¤t->sighand->siglock);
2610 sigorsets(set, ¤t->pending.signal,
2611 ¤t->signal->shared_pending.signal);
2612 spin_unlock_irq(¤t->sighand->siglock);
2614 /* Outside the lock because only this thread touches it. */
2615 sigandsets(set, ¤t->blocked, set);
2620 * sys_rt_sigpending - examine a pending signal that has been raised
2622 * @uset: stores pending signals
2623 * @sigsetsize: size of sigset_t type or larger
2625 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2628 int err = do_sigpending(&set, sigsetsize);
2629 if (!err && copy_to_user(uset, &set, sigsetsize))
2634 #ifdef CONFIG_COMPAT
2635 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2636 compat_size_t, sigsetsize)
2640 int err = do_sigpending(&set, sigsetsize);
2642 compat_sigset_t set32;
2643 sigset_to_compat(&set32, &set);
2644 /* we can get here only if sigsetsize <= sizeof(set) */
2645 if (copy_to_user(uset, &set32, sigsetsize))
2650 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2655 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2657 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2661 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2663 if (from->si_code < 0)
2664 return __copy_to_user(to, from, sizeof(siginfo_t))
2667 * If you change siginfo_t structure, please be sure
2668 * this code is fixed accordingly.
2669 * Please remember to update the signalfd_copyinfo() function
2670 * inside fs/signalfd.c too, in case siginfo_t changes.
2671 * It should never copy any pad contained in the structure
2672 * to avoid security leaks, but must copy the generic
2673 * 3 ints plus the relevant union member.
2675 err = __put_user(from->si_signo, &to->si_signo);
2676 err |= __put_user(from->si_errno, &to->si_errno);
2677 err |= __put_user((short)from->si_code, &to->si_code);
2678 switch (from->si_code & __SI_MASK) {
2680 err |= __put_user(from->si_pid, &to->si_pid);
2681 err |= __put_user(from->si_uid, &to->si_uid);
2684 err |= __put_user(from->si_tid, &to->si_tid);
2685 err |= __put_user(from->si_overrun, &to->si_overrun);
2686 err |= __put_user(from->si_ptr, &to->si_ptr);
2689 err |= __put_user(from->si_band, &to->si_band);
2690 err |= __put_user(from->si_fd, &to->si_fd);
2693 err |= __put_user(from->si_addr, &to->si_addr);
2694 #ifdef __ARCH_SI_TRAPNO
2695 err |= __put_user(from->si_trapno, &to->si_trapno);
2697 #ifdef BUS_MCEERR_AO
2699 * Other callers might not initialize the si_lsb field,
2700 * so check explicitly for the right codes here.
2702 if (from->si_signo == SIGBUS &&
2703 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2704 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2707 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2708 err |= __put_user(from->si_lower, &to->si_lower);
2709 err |= __put_user(from->si_upper, &to->si_upper);
2714 err |= __put_user(from->si_pid, &to->si_pid);
2715 err |= __put_user(from->si_uid, &to->si_uid);
2716 err |= __put_user(from->si_status, &to->si_status);
2717 err |= __put_user(from->si_utime, &to->si_utime);
2718 err |= __put_user(from->si_stime, &to->si_stime);
2720 case __SI_RT: /* This is not generated by the kernel as of now. */
2721 case __SI_MESGQ: /* But this is */
2722 err |= __put_user(from->si_pid, &to->si_pid);
2723 err |= __put_user(from->si_uid, &to->si_uid);
2724 err |= __put_user(from->si_ptr, &to->si_ptr);
2726 #ifdef __ARCH_SIGSYS
2728 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2729 err |= __put_user(from->si_syscall, &to->si_syscall);
2730 err |= __put_user(from->si_arch, &to->si_arch);
2733 default: /* this is just in case for now ... */
2734 err |= __put_user(from->si_pid, &to->si_pid);
2735 err |= __put_user(from->si_uid, &to->si_uid);
2744 * do_sigtimedwait - wait for queued signals specified in @which
2745 * @which: queued signals to wait for
2746 * @info: if non-null, the signal's siginfo is returned here
2747 * @ts: upper bound on process time suspension
2749 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2750 const struct timespec *ts)
2752 struct task_struct *tsk = current;
2753 long timeout = MAX_SCHEDULE_TIMEOUT;
2754 sigset_t mask = *which;
2758 if (!timespec_valid(ts))
2760 timeout = timespec_to_jiffies(ts);
2762 * We can be close to the next tick, add another one
2763 * to ensure we will wait at least the time asked for.
2765 if (ts->tv_sec || ts->tv_nsec)
2770 * Invert the set of allowed signals to get those we want to block.
2772 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2775 spin_lock_irq(&tsk->sighand->siglock);
2776 sig = dequeue_signal(tsk, &mask, info);
2777 if (!sig && timeout) {
2779 * None ready, temporarily unblock those we're interested
2780 * while we are sleeping in so that we'll be awakened when
2781 * they arrive. Unblocking is always fine, we can avoid
2782 * set_current_blocked().
2784 tsk->real_blocked = tsk->blocked;
2785 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2786 recalc_sigpending();
2787 spin_unlock_irq(&tsk->sighand->siglock);
2789 timeout = freezable_schedule_timeout_interruptible(timeout);
2791 spin_lock_irq(&tsk->sighand->siglock);
2792 __set_task_blocked(tsk, &tsk->real_blocked);
2793 sigemptyset(&tsk->real_blocked);
2794 sig = dequeue_signal(tsk, &mask, info);
2796 spin_unlock_irq(&tsk->sighand->siglock);
2800 return timeout ? -EINTR : -EAGAIN;
2804 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2806 * @uthese: queued signals to wait for
2807 * @uinfo: if non-null, the signal's siginfo is returned here
2808 * @uts: upper bound on process time suspension
2809 * @sigsetsize: size of sigset_t type
2811 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2812 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2820 /* XXX: Don't preclude handling different sized sigset_t's. */
2821 if (sigsetsize != sizeof(sigset_t))
2824 if (copy_from_user(&these, uthese, sizeof(these)))
2828 if (copy_from_user(&ts, uts, sizeof(ts)))
2832 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2834 if (ret > 0 && uinfo) {
2835 if (copy_siginfo_to_user(uinfo, &info))
2843 * sys_kill - send a signal to a process
2844 * @pid: the PID of the process
2845 * @sig: signal to be sent
2847 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2849 struct siginfo info;
2851 info.si_signo = sig;
2853 info.si_code = SI_USER;
2854 info.si_pid = task_tgid_vnr(current);
2855 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2857 return kill_something_info(sig, &info, pid);
2861 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2863 struct task_struct *p;
2867 p = find_task_by_vpid(pid);
2868 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2869 error = check_kill_permission(sig, info, p);
2871 * The null signal is a permissions and process existence
2872 * probe. No signal is actually delivered.
2874 if (!error && sig) {
2875 error = do_send_sig_info(sig, info, p, false);
2877 * If lock_task_sighand() failed we pretend the task
2878 * dies after receiving the signal. The window is tiny,
2879 * and the signal is private anyway.
2881 if (unlikely(error == -ESRCH))
2890 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2892 struct siginfo info = {};
2894 info.si_signo = sig;
2896 info.si_code = SI_TKILL;
2897 info.si_pid = task_tgid_vnr(current);
2898 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2900 return do_send_specific(tgid, pid, sig, &info);
2904 * sys_tgkill - send signal to one specific thread
2905 * @tgid: the thread group ID of the thread
2906 * @pid: the PID of the thread
2907 * @sig: signal to be sent
2909 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2910 * exists but it's not belonging to the target process anymore. This
2911 * method solves the problem of threads exiting and PIDs getting reused.
2913 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2915 /* This is only valid for single tasks */
2916 if (pid <= 0 || tgid <= 0)
2919 return do_tkill(tgid, pid, sig);
2923 * sys_tkill - send signal to one specific task
2924 * @pid: the PID of the task
2925 * @sig: signal to be sent
2927 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2929 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2931 /* This is only valid for single tasks */
2935 return do_tkill(0, pid, sig);
2938 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2940 /* Not even root can pretend to send signals from the kernel.
2941 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2943 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2944 (task_pid_vnr(current) != pid))
2947 info->si_signo = sig;
2949 /* POSIX.1b doesn't mention process groups. */
2950 return kill_proc_info(sig, info, pid);
2954 * sys_rt_sigqueueinfo - send signal information to a signal
2955 * @pid: the PID of the thread
2956 * @sig: signal to be sent
2957 * @uinfo: signal info to be sent
2959 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2960 siginfo_t __user *, uinfo)
2963 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2965 return do_rt_sigqueueinfo(pid, sig, &info);
2968 #ifdef CONFIG_COMPAT
2969 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2972 struct compat_siginfo __user *, uinfo)
2974 siginfo_t info = {};
2975 int ret = copy_siginfo_from_user32(&info, uinfo);
2978 return do_rt_sigqueueinfo(pid, sig, &info);
2982 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2984 /* This is only valid for single tasks */
2985 if (pid <= 0 || tgid <= 0)
2988 /* Not even root can pretend to send signals from the kernel.
2989 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2991 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2992 (task_pid_vnr(current) != pid))
2995 info->si_signo = sig;
2997 return do_send_specific(tgid, pid, sig, info);
3000 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3001 siginfo_t __user *, uinfo)
3005 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3008 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3011 #ifdef CONFIG_COMPAT
3012 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3016 struct compat_siginfo __user *, uinfo)
3018 siginfo_t info = {};
3020 if (copy_siginfo_from_user32(&info, uinfo))
3022 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3027 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3029 void kernel_sigaction(int sig, __sighandler_t action)
3031 spin_lock_irq(¤t->sighand->siglock);
3032 current->sighand->action[sig - 1].sa.sa_handler = action;
3033 if (action == SIG_IGN) {
3037 sigaddset(&mask, sig);
3039 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3040 flush_sigqueue_mask(&mask, ¤t->pending);
3041 recalc_sigpending();
3043 spin_unlock_irq(¤t->sighand->siglock);
3045 EXPORT_SYMBOL(kernel_sigaction);
3047 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3049 struct task_struct *p = current, *t;
3050 struct k_sigaction *k;
3053 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3056 k = &p->sighand->action[sig-1];
3058 spin_lock_irq(&p->sighand->siglock);
3063 sigdelsetmask(&act->sa.sa_mask,
3064 sigmask(SIGKILL) | sigmask(SIGSTOP));
3068 * "Setting a signal action to SIG_IGN for a signal that is
3069 * pending shall cause the pending signal to be discarded,
3070 * whether or not it is blocked."
3072 * "Setting a signal action to SIG_DFL for a signal that is
3073 * pending and whose default action is to ignore the signal
3074 * (for example, SIGCHLD), shall cause the pending signal to
3075 * be discarded, whether or not it is blocked"
3077 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3079 sigaddset(&mask, sig);
3080 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3081 for_each_thread(p, t)
3082 flush_sigqueue_mask(&mask, &t->pending);
3086 spin_unlock_irq(&p->sighand->siglock);
3091 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3096 oss.ss_sp = (void __user *) current->sas_ss_sp;
3097 oss.ss_size = current->sas_ss_size;
3098 oss.ss_flags = sas_ss_flags(sp);
3106 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3108 error = __get_user(ss_sp, &uss->ss_sp) |
3109 __get_user(ss_flags, &uss->ss_flags) |
3110 __get_user(ss_size, &uss->ss_size);
3115 if (on_sig_stack(sp))
3120 * Note - this code used to test ss_flags incorrectly:
3121 * old code may have been written using ss_flags==0
3122 * to mean ss_flags==SS_ONSTACK (as this was the only
3123 * way that worked) - this fix preserves that older
3126 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3129 if (ss_flags == SS_DISABLE) {
3134 if (ss_size < MINSIGSTKSZ)
3138 current->sas_ss_sp = (unsigned long) ss_sp;
3139 current->sas_ss_size = ss_size;
3145 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3147 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3148 __put_user(oss.ss_size, &uoss->ss_size) |
3149 __put_user(oss.ss_flags, &uoss->ss_flags);
3155 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3157 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3160 int restore_altstack(const stack_t __user *uss)
3162 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3163 /* squash all but EFAULT for now */
3164 return err == -EFAULT ? err : 0;
3167 int __save_altstack(stack_t __user *uss, unsigned long sp)
3169 struct task_struct *t = current;
3170 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3171 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3172 __put_user(t->sas_ss_size, &uss->ss_size);
3175 #ifdef CONFIG_COMPAT
3176 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3177 const compat_stack_t __user *, uss_ptr,
3178 compat_stack_t __user *, uoss_ptr)
3185 compat_stack_t uss32;
3187 memset(&uss, 0, sizeof(stack_t));
3188 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3190 uss.ss_sp = compat_ptr(uss32.ss_sp);
3191 uss.ss_flags = uss32.ss_flags;
3192 uss.ss_size = uss32.ss_size;
3196 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3197 (stack_t __force __user *) &uoss,
3198 compat_user_stack_pointer());
3200 if (ret >= 0 && uoss_ptr) {
3201 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3202 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3203 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3204 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3210 int compat_restore_altstack(const compat_stack_t __user *uss)
3212 int err = compat_sys_sigaltstack(uss, NULL);
3213 /* squash all but -EFAULT for now */
3214 return err == -EFAULT ? err : 0;
3217 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3219 struct task_struct *t = current;
3220 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3221 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3222 __put_user(t->sas_ss_size, &uss->ss_size);
3226 #ifdef __ARCH_WANT_SYS_SIGPENDING
3229 * sys_sigpending - examine pending signals
3230 * @set: where mask of pending signal is returned
3232 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3234 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3239 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3241 * sys_sigprocmask - examine and change blocked signals
3242 * @how: whether to add, remove, or set signals
3243 * @nset: signals to add or remove (if non-null)
3244 * @oset: previous value of signal mask if non-null
3246 * Some platforms have their own version with special arguments;
3247 * others support only sys_rt_sigprocmask.
3250 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3251 old_sigset_t __user *, oset)
3253 old_sigset_t old_set, new_set;
3254 sigset_t new_blocked;
3256 old_set = current->blocked.sig[0];
3259 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3262 new_blocked = current->blocked;
3266 sigaddsetmask(&new_blocked, new_set);
3269 sigdelsetmask(&new_blocked, new_set);
3272 new_blocked.sig[0] = new_set;
3278 set_current_blocked(&new_blocked);
3282 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3288 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3290 #ifndef CONFIG_ODD_RT_SIGACTION
3292 * sys_rt_sigaction - alter an action taken by a process
3293 * @sig: signal to be sent
3294 * @act: new sigaction
3295 * @oact: used to save the previous sigaction
3296 * @sigsetsize: size of sigset_t type
3298 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3299 const struct sigaction __user *, act,
3300 struct sigaction __user *, oact,
3303 struct k_sigaction new_sa, old_sa;
3306 /* XXX: Don't preclude handling different sized sigset_t's. */
3307 if (sigsetsize != sizeof(sigset_t))
3311 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3315 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3318 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3324 #ifdef CONFIG_COMPAT
3325 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3326 const struct compat_sigaction __user *, act,
3327 struct compat_sigaction __user *, oact,
3328 compat_size_t, sigsetsize)
3330 struct k_sigaction new_ka, old_ka;
3331 compat_sigset_t mask;
3332 #ifdef __ARCH_HAS_SA_RESTORER
3333 compat_uptr_t restorer;
3337 /* XXX: Don't preclude handling different sized sigset_t's. */
3338 if (sigsetsize != sizeof(compat_sigset_t))
3342 compat_uptr_t handler;
3343 ret = get_user(handler, &act->sa_handler);
3344 new_ka.sa.sa_handler = compat_ptr(handler);
3345 #ifdef __ARCH_HAS_SA_RESTORER
3346 ret |= get_user(restorer, &act->sa_restorer);
3347 new_ka.sa.sa_restorer = compat_ptr(restorer);
3349 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3350 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3353 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3356 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3358 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3359 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3361 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3362 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3363 #ifdef __ARCH_HAS_SA_RESTORER
3364 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3365 &oact->sa_restorer);
3371 #endif /* !CONFIG_ODD_RT_SIGACTION */
3373 #ifdef CONFIG_OLD_SIGACTION
3374 SYSCALL_DEFINE3(sigaction, int, sig,
3375 const struct old_sigaction __user *, act,
3376 struct old_sigaction __user *, oact)
3378 struct k_sigaction new_ka, old_ka;
3383 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3384 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3385 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3386 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3387 __get_user(mask, &act->sa_mask))
3389 #ifdef __ARCH_HAS_KA_RESTORER
3390 new_ka.ka_restorer = NULL;
3392 siginitset(&new_ka.sa.sa_mask, mask);
3395 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3398 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3399 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3400 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3401 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3402 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3409 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3410 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3411 const struct compat_old_sigaction __user *, act,
3412 struct compat_old_sigaction __user *, oact)
3414 struct k_sigaction new_ka, old_ka;
3416 compat_old_sigset_t mask;
3417 compat_uptr_t handler, restorer;
3420 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3421 __get_user(handler, &act->sa_handler) ||
3422 __get_user(restorer, &act->sa_restorer) ||
3423 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3424 __get_user(mask, &act->sa_mask))
3427 #ifdef __ARCH_HAS_KA_RESTORER
3428 new_ka.ka_restorer = NULL;
3430 new_ka.sa.sa_handler = compat_ptr(handler);
3431 new_ka.sa.sa_restorer = compat_ptr(restorer);
3432 siginitset(&new_ka.sa.sa_mask, mask);
3435 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3438 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3439 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3440 &oact->sa_handler) ||
3441 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3442 &oact->sa_restorer) ||
3443 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3444 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3451 #ifdef CONFIG_SGETMASK_SYSCALL
3454 * For backwards compatibility. Functionality superseded by sigprocmask.
3456 SYSCALL_DEFINE0(sgetmask)
3459 return current->blocked.sig[0];
3462 SYSCALL_DEFINE1(ssetmask, int, newmask)
3464 int old = current->blocked.sig[0];
3467 siginitset(&newset, newmask);
3468 set_current_blocked(&newset);
3472 #endif /* CONFIG_SGETMASK_SYSCALL */
3474 #ifdef __ARCH_WANT_SYS_SIGNAL
3476 * For backwards compatibility. Functionality superseded by sigaction.
3478 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3480 struct k_sigaction new_sa, old_sa;
3483 new_sa.sa.sa_handler = handler;
3484 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3485 sigemptyset(&new_sa.sa.sa_mask);
3487 ret = do_sigaction(sig, &new_sa, &old_sa);
3489 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3491 #endif /* __ARCH_WANT_SYS_SIGNAL */
3493 #ifdef __ARCH_WANT_SYS_PAUSE
3495 SYSCALL_DEFINE0(pause)
3497 while (!signal_pending(current)) {
3498 __set_current_state(TASK_INTERRUPTIBLE);
3501 return -ERESTARTNOHAND;
3506 static int sigsuspend(sigset_t *set)
3508 current->saved_sigmask = current->blocked;
3509 set_current_blocked(set);
3511 __set_current_state(TASK_INTERRUPTIBLE);
3513 set_restore_sigmask();
3514 return -ERESTARTNOHAND;
3518 * sys_rt_sigsuspend - replace the signal mask for a value with the
3519 * @unewset value until a signal is received
3520 * @unewset: new signal mask value
3521 * @sigsetsize: size of sigset_t type
3523 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3527 /* XXX: Don't preclude handling different sized sigset_t's. */
3528 if (sigsetsize != sizeof(sigset_t))
3531 if (copy_from_user(&newset, unewset, sizeof(newset)))
3533 return sigsuspend(&newset);
3536 #ifdef CONFIG_COMPAT
3537 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3541 compat_sigset_t newset32;
3543 /* XXX: Don't preclude handling different sized sigset_t's. */
3544 if (sigsetsize != sizeof(sigset_t))
3547 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3549 sigset_from_compat(&newset, &newset32);
3550 return sigsuspend(&newset);
3552 /* on little-endian bitmaps don't care about granularity */
3553 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3558 #ifdef CONFIG_OLD_SIGSUSPEND
3559 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3562 siginitset(&blocked, mask);
3563 return sigsuspend(&blocked);
3566 #ifdef CONFIG_OLD_SIGSUSPEND3
3567 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3570 siginitset(&blocked, mask);
3571 return sigsuspend(&blocked);
3575 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3580 void __init signals_init(void)
3582 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3585 #ifdef CONFIG_KGDB_KDB
3586 #include <linux/kdb.h>
3588 * kdb_send_sig_info - Allows kdb to send signals without exposing
3589 * signal internals. This function checks if the required locks are
3590 * available before calling the main signal code, to avoid kdb
3594 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3596 static struct task_struct *kdb_prev_t;
3598 if (!spin_trylock(&t->sighand->siglock)) {
3599 kdb_printf("Can't do kill command now.\n"
3600 "The sigmask lock is held somewhere else in "
3601 "kernel, try again later\n");
3604 spin_unlock(&t->sighand->siglock);
3605 new_t = kdb_prev_t != t;
3607 if (t->state != TASK_RUNNING && new_t) {
3608 kdb_printf("Process is not RUNNING, sending a signal from "
3609 "kdb risks deadlock\n"
3610 "on the run queue locks. "
3611 "The signal has _not_ been sent.\n"
3612 "Reissue the kill command if you want to risk "
3616 sig = info->si_signo;
3617 if (send_sig_info(sig, info, t))
3618 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3621 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3623 #endif /* CONFIG_KGDB_KDB */