rk: revert to v3.10
[firefly-linux-kernel-4.4.55.git] / kernel / signal.c
index 415d85d6f6c637b099826d012e46f70832b1d557..113411bfe8b1205ad0f26556776f2a317e06eb0c 100644 (file)
  */
 
 #include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
+#include <linux/coredump.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/ptrace.h>
 #include <linux/freezer.h>
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
+#include <linux/user_namespace.h>
+#include <linux/uprobes.h>
+#include <linux/compat.h>
+#include <linux/cn_proc.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/signal.h>
 
@@ -35,6 +40,7 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/siginfo.h>
+#include <asm/cacheflush.h>
 #include "audit.h"     /* audit_signal_info() */
 
 /*
@@ -57,21 +63,20 @@ static int sig_handler_ignored(void __user *handler, int sig)
                (handler == SIG_DFL && sig_kernel_ignore(sig));
 }
 
-static int sig_task_ignored(struct task_struct *t, int sig,
-               int from_ancestor_ns)
+static int sig_task_ignored(struct task_struct *t, int sig, bool force)
 {
        void __user *handler;
 
        handler = sig_handler(t, sig);
 
        if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
-                       handler == SIG_DFL && !from_ancestor_ns)
+                       handler == SIG_DFL && !force)
                return 1;
 
        return sig_handler_ignored(handler, sig);
 }
 
-static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
+static int sig_ignored(struct task_struct *t, int sig, bool force)
 {
        /*
         * Blocked signals are never ignored, since the
@@ -81,13 +86,13 @@ static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
        if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
                return 0;
 
-       if (!sig_task_ignored(t, sig, from_ancestor_ns))
+       if (!sig_task_ignored(t, sig, force))
                return 0;
 
        /*
         * Tracers may want to know about even ignored signals.
         */
-       return !tracehook_consider_ignored_signal(t, sig);
+       return !t->ptrace;
 }
 
 /*
@@ -124,7 +129,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 
 static int recalc_sigpending_tsk(struct task_struct *t)
 {
-       if ((t->group_stop & GROUP_STOP_PENDING) ||
+       if ((t->jobctl & JOBCTL_PENDING_MASK) ||
            PENDING(&t->pending, &t->blocked) ||
            PENDING(&t->signal->shared_pending, &t->blocked)) {
                set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -150,9 +155,7 @@ void recalc_sigpending_and_wake(struct task_struct *t)
 
 void recalc_sigpending(void)
 {
-       if (unlikely(tracehook_force_sigpending()))
-               set_thread_flag(TIF_SIGPENDING);
-       else if (!recalc_sigpending_tsk(current) && !freezing(current))
+       if (!recalc_sigpending_tsk(current) && !freezing(current))
                clear_thread_flag(TIF_SIGPENDING);
 
 }
@@ -161,7 +164,7 @@ void recalc_sigpending(void)
 
 #define SYNCHRONOUS_MASK \
        (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
-        sigmask(SIGTRAP) | sigmask(SIGFPE))
+        sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
 
 int next_signal(struct sigpending *pending, sigset_t *mask)
 {
@@ -224,47 +227,93 @@ static inline void print_dropped_signal(int sig)
 }
 
 /**
- * task_clear_group_stop_trapping - clear group stop trapping bit
+ * task_set_jobctl_pending - set jobctl pending bits
  * @task: target task
+ * @mask: pending bits to set
  *
- * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us.  Clear it
- * and wake up the ptracer.  Note that we don't need any further locking.
- * @task->siglock guarantees that @task->parent points to the ptracer.
+ * Clear @mask from @task->jobctl.  @mask must be subset of
+ * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
+ * %JOBCTL_TRAPPING.  If stop signo is being set, the existing signo is
+ * cleared.  If @task is already being killed or exiting, this function
+ * becomes noop.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ *
+ * RETURNS:
+ * %true if @mask is set, %false if made noop because @task was dying.
+ */
+bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
+{
+       BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
+                       JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
+       BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
+
+       if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
+               return false;
+
+       if (mask & JOBCTL_STOP_SIGMASK)
+               task->jobctl &= ~JOBCTL_STOP_SIGMASK;
+
+       task->jobctl |= mask;
+       return true;
+}
+
+/**
+ * task_clear_jobctl_trapping - clear jobctl trapping bit
+ * @task: target task
+ *
+ * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
+ * Clear it and wake up the ptracer.  Note that we don't need any further
+ * locking.  @task->siglock guarantees that @task->parent points to the
+ * ptracer.
  *
  * CONTEXT:
  * Must be called with @task->sighand->siglock held.
  */
-static void task_clear_group_stop_trapping(struct task_struct *task)
+void task_clear_jobctl_trapping(struct task_struct *task)
 {
-       if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
-               task->group_stop &= ~GROUP_STOP_TRAPPING;
-               __wake_up_sync_key(&task->parent->signal->wait_chldexit,
-                                  TASK_UNINTERRUPTIBLE, 1, task);
+       if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
+               task->jobctl &= ~JOBCTL_TRAPPING;
+               wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
        }
 }
 
 /**
- * task_clear_group_stop_pending - clear pending group stop
+ * task_clear_jobctl_pending - clear jobctl pending bits
  * @task: target task
+ * @mask: pending bits to clear
+ *
+ * Clear @mask from @task->jobctl.  @mask must be subset of
+ * %JOBCTL_PENDING_MASK.  If %JOBCTL_STOP_PENDING is being cleared, other
+ * STOP bits are cleared together.
  *
- * Clear group stop states for @task.
+ * If clearing of @mask leaves no stop or trap pending, this function calls
+ * task_clear_jobctl_trapping().
  *
  * CONTEXT:
  * Must be called with @task->sighand->siglock held.
  */
-void task_clear_group_stop_pending(struct task_struct *task)
+void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
 {
-       task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
-                             GROUP_STOP_DEQUEUED);
+       BUG_ON(mask & ~JOBCTL_PENDING_MASK);
+
+       if (mask & JOBCTL_STOP_PENDING)
+               mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
+
+       task->jobctl &= ~mask;
+
+       if (!(task->jobctl & JOBCTL_PENDING_MASK))
+               task_clear_jobctl_trapping(task);
 }
 
 /**
  * task_participate_group_stop - participate in a group stop
  * @task: task participating in a group stop
  *
- * @task has GROUP_STOP_PENDING set and is participating in a group stop.
+ * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
  * Group stop states are cleared and the group stop count is consumed if
- * %GROUP_STOP_CONSUME was set.  If the consumption completes the group
+ * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
  * stop, the appropriate %SIGNAL_* flags are set.
  *
  * CONTEXT:
@@ -277,11 +326,11 @@ void task_clear_group_stop_pending(struct task_struct *task)
 static bool task_participate_group_stop(struct task_struct *task)
 {
        struct signal_struct *sig = task->signal;
-       bool consume = task->group_stop & GROUP_STOP_CONSUME;
+       bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
 
-       WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
+       WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
 
-       task_clear_group_stop_pending(task);
+       task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
 
        if (!consume)
                return false;
@@ -437,6 +486,9 @@ flush_signal_handlers(struct task_struct *t, int force_default)
                if (force_default || ka->sa.sa_handler != SIG_IGN)
                        ka->sa.sa_handler = SIG_DFL;
                ka->sa.sa_flags = 0;
+#ifdef __ARCH_HAS_SA_RESTORER
+               ka->sa.sa_restorer = NULL;
+#endif
                sigemptyset(&ka->sa.sa_mask);
                ka++;
        }
@@ -449,7 +501,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
                return 1;
        if (handler != SIG_IGN && handler != SIG_DFL)
                return 0;
-       return !tracehook_consider_fatal_signal(tsk, sig);
+       /* if ptraced, let the tracer determine */
+       return !tsk->ptrace;
 }
 
 /*
@@ -604,7 +657,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                 * is to alert stop-signal processing code when another
                 * processor has come along and cleared the flag.
                 */
-               current->group_stop |= GROUP_STOP_DEQUEUED;
+               current->jobctl |= JOBCTL_STOP_DEQUEUED;
        }
        if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
                /*
@@ -631,23 +684,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
  * No need to set need_resched since signal event passing
  * goes through ->blocked
  */
-void signal_wake_up(struct task_struct *t, int resume)
+void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
-       unsigned int mask;
-
        set_tsk_thread_flag(t, TIF_SIGPENDING);
-
        /*
-        * For SIGKILL, we want to wake it up in the stopped/traced/killable
+        * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
         * case. We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
         * By using wake_up_state, we ensure the process will wake up and
         * handle its death signal.
         */
-       mask = TASK_INTERRUPTIBLE;
-       if (resume)
-               mask |= TASK_WAKEKILL;
-       if (!wake_up_state(t, mask))
+       if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
                kick_process(t);
 }
 
@@ -721,14 +768,13 @@ static int kill_ok_by_cred(struct task_struct *t)
        const struct cred *cred = current_cred();
        const struct cred *tcred = __task_cred(t);
 
-       if (cred->user->user_ns == tcred->user->user_ns &&
-           (cred->euid == tcred->suid ||
-            cred->euid == tcred->uid ||
-            cred->uid  == tcred->suid ||
-            cred->uid  == tcred->uid))
+       if (uid_eq(cred->euid, tcred->suid) ||
+           uid_eq(cred->euid, tcred->uid)  ||
+           uid_eq(cred->uid,  tcred->suid) ||
+           uid_eq(cred->uid,  tcred->uid))
                return 1;
 
-       if (ns_capable(tcred->user->user_ns, CAP_KILL))
+       if (ns_capable(tcred->user_ns, CAP_KILL))
                return 1;
 
        return 0;
@@ -773,6 +819,32 @@ static int check_kill_permission(int sig, struct siginfo *info,
        return security_task_kill(t, info, sig, 0);
 }
 
+/**
+ * ptrace_trap_notify - schedule trap to notify ptracer
+ * @t: tracee wanting to notify tracer
+ *
+ * This function schedules sticky ptrace trap which is cleared on the next
+ * TRAP_STOP to notify ptracer of an event.  @t must have been seized by
+ * ptracer.
+ *
+ * If @t is running, STOP trap will be taken.  If trapped for STOP and
+ * ptracer is listening for events, tracee is woken up so that it can
+ * re-trap for the new event.  If trapped otherwise, STOP trap will be
+ * eventually taken without returning to userland after the existing traps
+ * are finished by PTRACE_CONT.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+static void ptrace_trap_notify(struct task_struct *t)
+{
+       WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
+       assert_spin_locked(&t->sighand->siglock);
+
+       task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+       ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+}
+
 /*
  * Handle magic process-wide effects of stop/continue signals. Unlike
  * the signal actions, these happen immediately at signal-generation
@@ -783,12 +855,14 @@ static int check_kill_permission(int sig, struct siginfo *info,
  * Returns true if the signal should be actually delivered, otherwise
  * it should be dropped.
  */
-static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
+static bool prepare_signal(int sig, struct task_struct *p, bool force)
 {
        struct signal_struct *signal = p->signal;
        struct task_struct *t;
 
-       if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
+       if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
+               if (signal->flags & SIGNAL_GROUP_COREDUMP)
+                       return sig == SIGKILL;
                /*
                 * The process is in the middle of dying, nothing to do.
                 */
@@ -809,9 +883,12 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
                rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
                t = p;
                do {
-                       task_clear_group_stop_pending(t);
+                       task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-                       wake_up_state(t, __TASK_STOPPED);
+                       if (likely(!(t->ptrace & PT_SEIZED)))
+                               wake_up_state(t, __TASK_STOPPED);
+                       else
+                               ptrace_trap_notify(t);
                } while_each_thread(p, t);
 
                /*
@@ -840,7 +917,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
                }
        }
 
-       return !sig_ignored(p, sig, from_ancestor_ns);
+       return !sig_ignored(p, sig, force);
 }
 
 /*
@@ -908,8 +985,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
        if (sig_fatal(p, sig) &&
            !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
            !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL ||
-            !tracehook_consider_fatal_signal(t, sig))) {
+           (sig == SIGKILL || !t->ptrace)) {
                /*
                 * This signal will be fatal to the whole group.
                 */
@@ -925,7 +1001,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
                        signal->group_stop_count = 0;
                        t = p;
                        do {
-                               task_clear_group_stop_pending(t);
+                               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                                sigaddset(&t->pending.signal, SIGKILL);
                                signal_wake_up(t, 1);
                        } while_each_thread(p, t);
@@ -946,19 +1022,41 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
        return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 }
 
+#ifdef CONFIG_USER_NS
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+       if (current_user_ns() == task_cred_xxx(t, user_ns))
+               return;
+
+       if (SI_FROMKERNEL(info))
+               return;
+
+       rcu_read_lock();
+       info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
+                                       make_kuid(current_user_ns(), info->si_uid));
+       rcu_read_unlock();
+}
+#else
+static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
+{
+       return;
+}
+#endif
+
 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        int group, int from_ancestor_ns)
 {
        struct sigpending *pending;
        struct sigqueue *q;
        int override_rlimit;
-
-       trace_signal_generate(sig, info, t);
+       int ret = 0, result;
 
        assert_spin_locked(&t->sighand->siglock);
 
-       if (!prepare_signal(sig, t, from_ancestor_ns))
-               return 0;
+       result = TRACE_SIGNAL_IGNORED;
+       if (!prepare_signal(sig, t,
+                       from_ancestor_ns || (info == SEND_SIG_FORCED)))
+               goto ret;
 
        pending = group ? &t->signal->shared_pending : &t->pending;
        /*
@@ -966,8 +1064,11 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
         * exactly one non-rt signal, so that we can get more
         * detailed information about the cause of the signal.
         */
+       result = TRACE_SIGNAL_ALREADY_PENDING;
        if (legacy_queue(pending, sig))
-               return 0;
+               goto ret;
+
+       result = TRACE_SIGNAL_DELIVERED;
        /*
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
@@ -1000,7 +1101,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                        q->info.si_code = SI_USER;
                        q->info.si_pid = task_tgid_nr_ns(current,
                                                        task_active_pid_ns(t));
-                       q->info.si_uid = current_uid();
+                       q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
                        break;
                case (unsigned long) SEND_SIG_PRIV:
                        q->info.si_signo = sig;
@@ -1015,6 +1116,9 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                                q->info.si_pid = 0;
                        break;
                }
+
+               userns_fixup_signal_uid(&q->info, t);
+
        } else if (!is_si_special(info)) {
                if (sig >= SIGRTMIN && info->si_code != SI_USER) {
                        /*
@@ -1022,14 +1126,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                         * signal was rt and sent by user using something
                         * other than kill().
                         */
-                       trace_signal_overflow_fail(sig, group, info);
-                       return -EAGAIN;
+                       result = TRACE_SIGNAL_OVERFLOW_FAIL;
+                       ret = -EAGAIN;
+                       goto ret;
                } else {
                        /*
                         * This is a silent loss of information.  We still
                         * send the signal, but the *info bits are lost.
                         */
-                       trace_signal_lose_info(sig, group, info);
+                       result = TRACE_SIGNAL_LOSE_INFO;
                }
        }
 
@@ -1037,7 +1142,9 @@ out_set:
        signalfd_notify(t, sig);
        sigaddset(&pending->signal, sig);
        complete_signal(sig, t, group);
-       return 0;
+ret:
+       trace_signal_generate(sig, info, t, group, result);
+       return ret;
 }
 
 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
@@ -1053,13 +1160,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
        return __send_signal(sig, info, t, group, from_ancestor_ns);
 }
 
-static void print_fatal_signal(struct pt_regs *regs, int signr)
+static void print_fatal_signal(int signr)
 {
-       printk("%s/%d: potentially unexpected fatal signal %d.\n",
-               current->comm, task_pid_nr(current), signr);
+       struct pt_regs *regs = signal_pt_regs();
+       printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
 
 #if defined(__i386__) && !defined(__arch_um__)
-       printk("code at %08lx: ", regs->ip);
+       printk(KERN_INFO "code at %08lx: ", regs->ip);
        {
                int i;
                for (i = 0; i < 16; i++) {
@@ -1067,11 +1174,11 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
 
                        if (get_user(insn, (unsigned char *)(regs->ip + i)))
                                break;
-                       printk("%02x ", insn);
+                       printk(KERN_CONT "%02x ", insn);
                }
        }
+       printk(KERN_CONT "\n");
 #endif
-       printk("\n");
        preempt_disable();
        show_regs(regs);
        preempt_enable();
@@ -1160,7 +1267,7 @@ int zap_other_threads(struct task_struct *p)
        p->signal->group_stop_count = 0;
 
        while_each_thread(p, t) {
-               task_clear_group_stop_pending(t);
+               task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                count++;
 
                /* Don't bother with already dead threads */
@@ -1271,13 +1378,22 @@ int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
        return error;
 }
 
+static int kill_as_cred_perm(const struct cred *cred,
+                            struct task_struct *target)
+{
+       const struct cred *pcred = __task_cred(target);
+       if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
+           !uid_eq(cred->uid,  pcred->suid) && !uid_eq(cred->uid,  pcred->uid))
+               return 0;
+       return 1;
+}
+
 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
-int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
-                     uid_t uid, uid_t euid, u32 secid)
+int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
+                        const struct cred *cred, u32 secid)
 {
        int ret = -EINVAL;
        struct task_struct *p;
-       const struct cred *pcred;
        unsigned long flags;
 
        if (!valid_signal(sig))
@@ -1289,10 +1405,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
                ret = -ESRCH;
                goto out_unlock;
        }
-       pcred = __task_cred(p);
-       if (si_fromuser(info) &&
-           euid != pcred->suid && euid != pcred->uid &&
-           uid  != pcred->suid && uid  != pcred->uid) {
+       if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
                ret = -EPERM;
                goto out_unlock;
        }
@@ -1311,7 +1424,7 @@ out_unlock:
        rcu_read_unlock();
        return ret;
 }
-EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
+EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1472,7 +1585,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
        int sig = q->info.si_signo;
        struct sigpending *pending;
        unsigned long flags;
-       int ret;
+       int ret, result;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
 
@@ -1481,7 +1594,8 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
                goto ret;
 
        ret = 1; /* the signal is ignored */
-       if (!prepare_signal(sig, t, 0))
+       result = TRACE_SIGNAL_IGNORED;
+       if (!prepare_signal(sig, t, false))
                goto out;
 
        ret = 0;
@@ -1492,6 +1606,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
                 */
                BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
+               result = TRACE_SIGNAL_ALREADY_PENDING;
                goto out;
        }
        q->info.si_overrun = 0;
@@ -1501,7 +1616,9 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
        list_add_tail(&q->list, &pending->list);
        sigaddset(&pending->signal, sig);
        complete_signal(sig, t, group);
+       result = TRACE_SIGNAL_DELIVERED;
 out:
+       trace_signal_generate(sig, &q->info, t, group, result);
        unlock_task_sighand(t, &flags);
 ret:
        return ret;
@@ -1511,47 +1628,56 @@ ret:
  * Let a parent know about the death of a child.
  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
  *
- * Returns -1 if our parent ignored us and so we've switched to
- * self-reaping, or else @sig.
+ * Returns true if our parent ignored us and so we've switched to
+ * self-reaping.
  */
-int do_notify_parent(struct task_struct *tsk, int sig)
+bool do_notify_parent(struct task_struct *tsk, int sig)
 {
        struct siginfo info;
        unsigned long flags;
        struct sighand_struct *psig;
-       int ret = sig;
+       bool autoreap = false;
+       cputime_t utime, stime;
 
        BUG_ON(sig == -1);
 
        /* do_notify_parent_cldstop should have been called instead.  */
        BUG_ON(task_is_stopped_or_traced(tsk));
 
-       BUG_ON(!task_ptrace(tsk) &&
+       BUG_ON(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
 
+       if (sig != SIGCHLD) {
+               /*
+                * This is only possible if parent == real_parent.
+                * Check if it has changed security domain.
+                */
+               if (tsk->parent_exec_id != tsk->parent->self_exec_id)
+                       sig = SIGCHLD;
+       }
+
        info.si_signo = sig;
        info.si_errno = 0;
        /*
-        * we are under tasklist_lock here so our parent is tied to
-        * us and cannot exit and release its namespace.
+        * We are under tasklist_lock here so our parent is tied to
+        * us and cannot change.
         *
-        * the only it can is to switch its nsproxy with sys_unshare,
-        * bu uncharing pid namespaces is not allowed, so we'll always
-        * see relevant namespace
+        * task_active_pid_ns will always return the same pid namespace
+        * until a task passes through release_task.
         *
         * write_lock() currently calls preempt_disable() which is the
         * same as rcu_read_lock(), but according to Oleg, this is not
         * correct to rely on this
         */
        rcu_read_lock();
-       info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
-       info.si_uid = __task_cred(tsk)->uid;
+       info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
+       info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
+                                      task_uid(tsk));
        rcu_read_unlock();
 
-       info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
-                               tsk->signal->utime));
-       info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
-                               tsk->signal->stime));
+       task_cputime(tsk, &utime, &stime);
+       info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
+       info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
@@ -1565,7 +1691,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
 
        psig = tsk->parent->sighand;
        spin_lock_irqsave(&psig->siglock, flags);
-       if (!task_ptrace(tsk) && sig == SIGCHLD &&
+       if (!tsk->ptrace && sig == SIGCHLD &&
            (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
             (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
                /*
@@ -1583,16 +1709,16 @@ int do_notify_parent(struct task_struct *tsk, int sig)
                 * is implementation-defined: we do (if you don't want
                 * it, just use SIG_IGN instead).
                 */
-               ret = tsk->exit_signal = -1;
+               autoreap = true;
                if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
-                       sig = -1;
+                       sig = 0;
        }
-       if (valid_signal(sig) && sig > 0)
+       if (valid_signal(sig) && sig)
                __group_send_sig_info(sig, &info, tsk->parent);
        __wake_up_parent(tsk, tsk->parent);
        spin_unlock_irqrestore(&psig->siglock, flags);
 
-       return ret;
+       return autoreap;
 }
 
 /**
@@ -1615,6 +1741,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
        unsigned long flags;
        struct task_struct *parent;
        struct sighand_struct *sighand;
+       cputime_t utime, stime;
 
        if (for_ptracer) {
                parent = tsk->parent;
@@ -1629,12 +1756,13 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
         * see comment in do_notify_parent() about the following 4 lines
         */
        rcu_read_lock();
-       info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
-       info.si_uid = __task_cred(tsk)->uid;
+       info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
+       info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
        rcu_read_unlock();
 
-       info.si_utime = cputime_to_clock_t(tsk->utime);
-       info.si_stime = cputime_to_clock_t(tsk->stime);
+       task_cputime(tsk, &utime, &stime);
+       info.si_utime = cputime_to_clock_t(utime);
+       info.si_stime = cputime_to_clock_t(stime);
 
        info.si_code = why;
        switch (why) {
@@ -1665,7 +1793,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
 
 static inline int may_ptrace_stop(void)
 {
-       if (!likely(task_ptrace(current)))
+       if (!likely(current->ptrace))
                return 0;
        /*
         * Are we in the middle of do_coredump?
@@ -1675,6 +1803,10 @@ static inline int may_ptrace_stop(void)
         * If SIGKILL was already sent before the caller unlocked
         * ->siglock we must see ->core_state != NULL. Otherwise it
         * is safe to enter schedule().
+        *
+        * This is almost outdated, a task with the pending SIGKILL can't
+        * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
+        * after SIGKILL was already dequeued.
         */
        if (unlikely(current->mm->core_state) &&
            unlikely(current->mm == current->parent->mm))
@@ -1693,15 +1825,6 @@ static int sigkill_pending(struct task_struct *tsk)
                sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
 }
 
-/*
- * Test whether the target task of the usual cldstop notification - the
- * real_parent of @child - is in the same group as the ptracer.
- */
-static bool real_parent_is_ptracer(struct task_struct *child)
-{
-       return same_thread_group(child->parent, child->real_parent);
-}
-
 /*
  * This must be called with current->sighand->siglock held.
  *
@@ -1739,31 +1862,34 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
        }
 
        /*
-        * If @why is CLD_STOPPED, we're trapping to participate in a group
-        * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
-        * while siglock was released for the arch hook, PENDING could be
-        * clear now.  We act as if SIGCONT is received after TASK_TRACED
-        * is entered - ignore it.
+        * We're committing to trapping.  TRACED should be visible before
+        * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
+        * Also, transition to TRACED and updates to ->jobctl should be
+        * atomic with respect to siglock and should be done after the arch
+        * hook as siglock is released and regrabbed across it.
         */
-       if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
-               gstop_done = task_participate_group_stop(current);
+       set_current_state(TASK_TRACED);
 
        current->last_siginfo = info;
        current->exit_code = exit_code;
 
        /*
-        * TRACED should be visible before TRAPPING is cleared; otherwise,
-        * the tracer might fail do_wait().
+        * If @why is CLD_STOPPED, we're trapping to participate in a group
+        * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
+        * across siglock relocks since INTERRUPT was scheduled, PENDING
+        * could be clear now.  We act as if SIGCONT is received after
+        * TASK_TRACED is entered - ignore it.
         */
-       set_current_state(TASK_TRACED);
+       if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
+               gstop_done = task_participate_group_stop(current);
 
-       /*
-        * We're committing to trapping.  Clearing GROUP_STOP_TRAPPING and
-        * transition to TASK_TRACED should be atomic with respect to
-        * siglock.  This hsould be done after the arch hook as siglock is
-        * released and regrabbed across it.
-        */
-       task_clear_group_stop_trapping(current);
+       /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
+       task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
+       if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
+               task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
+
+       /* entering a trap, clear TRAPPING */
+       task_clear_jobctl_trapping(current);
 
        spin_unlock_irq(&current->sighand->siglock);
        read_lock(&tasklist_lock);
@@ -1779,7 +1905,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
                 * separately unless they're gonna be duplicates.
                 */
                do_notify_parent_cldstop(current, true, why);
-               if (gstop_done && !real_parent_is_ptracer(current))
+               if (gstop_done && ptrace_reparented(current))
                        do_notify_parent_cldstop(current, false, why);
 
                /*
@@ -1791,7 +1917,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
                preempt_disable();
                read_unlock(&tasklist_lock);
                preempt_enable_no_resched();
-               schedule();
+               freezable_schedule();
        } else {
                /*
                 * By the time we got the lock, our tracer went away.
@@ -1799,26 +1925,20 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
                 *
                 * If @gstop_done, the ptracer went away between group stop
                 * completion and here.  During detach, it would have set
-                * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
-                * in do_signal_stop() on return, so notifying the real
-                * parent of the group stop completion is enough.
+                * JOBCTL_STOP_PENDING on us and we'll re-enter
+                * TASK_STOPPED in do_signal_stop() on return, so notifying
+                * the real parent of the group stop completion is enough.
                 */
                if (gstop_done)
                        do_notify_parent_cldstop(current, false, why);
 
+               /* tasklist protects us from ptrace_freeze_traced() */
                __set_current_state(TASK_RUNNING);
                if (clear_code)
                        current->exit_code = 0;
                read_unlock(&tasklist_lock);
        }
 
-       /*
-        * While in TASK_TRACED, we were considered "frozen enough".
-        * Now that we woke up, it's crucial if we're supposed to be
-        * frozen that we freeze now before running anything substantial.
-        */
-       try_to_freeze();
-
        /*
         * We are back.  Now reacquire the siglock before touching
         * last_siginfo, so that we are sure to have synchronized with
@@ -1827,6 +1947,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
        spin_lock_irq(&current->sighand->siglock);
        current->last_siginfo = NULL;
 
+       /* LISTENING can be set only during STOP traps, clear it */
+       current->jobctl &= ~JOBCTL_LISTENING;
+
        /*
         * Queued signals ignored us while we were stopped for tracing.
         * So check for any that we should take before resuming user mode.
@@ -1835,44 +1958,68 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
        recalc_sigpending_tsk(current);
 }
 
-void ptrace_notify(int exit_code)
+static void ptrace_do_notify(int signr, int exit_code, int why)
 {
        siginfo_t info;
 
-       BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
-
        memset(&info, 0, sizeof info);
-       info.si_signo = SIGTRAP;
+       info.si_signo = signr;
        info.si_code = exit_code;
        info.si_pid = task_pid_vnr(current);
-       info.si_uid = current_uid();
+       info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
 
        /* Let the debugger run.  */
+       ptrace_stop(exit_code, why, 1, &info);
+}
+
+void ptrace_notify(int exit_code)
+{
+       BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
+       if (unlikely(current->task_works))
+               task_work_run();
+
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
+       ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
        spin_unlock_irq(&current->sighand->siglock);
 }
 
-/*
- * This performs the stopping for SIGSTOP and other stop signals.
- * We have to stop all threads in the thread group.
- * Returns non-zero if we've actually stopped and released the siglock.
- * Returns zero if we didn't stop and still hold the siglock.
+/**
+ * do_signal_stop - handle group stop for SIGSTOP and other stop signals
+ * @signr: signr causing group stop if initiating
+ *
+ * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
+ * and participate in it.  If already set, participate in the existing
+ * group stop.  If participated in a group stop (and thus slept), %true is
+ * returned with siglock released.
+ *
+ * If ptraced, this function doesn't handle stop itself.  Instead,
+ * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
+ * untouched.  The caller must ensure that INTERRUPT trap handling takes
+ * places afterwards.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which is released
+ * on %true return.
+ *
+ * RETURNS:
+ * %false if group stop is already cancelled or ptrace trap is scheduled.
+ * %true if participated in group stop.
  */
-static int do_signal_stop(int signr)
+static bool do_signal_stop(int signr)
+       __releases(&current->sighand->siglock)
 {
        struct signal_struct *sig = current->signal;
 
-       if (!(current->group_stop & GROUP_STOP_PENDING)) {
-               unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
+       if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
+               unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
                struct task_struct *t;
 
-               /* signr will be recorded in task->group_stop for retries */
-               WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
+               /* signr will be recorded in task->jobctl for retries */
+               WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
 
-               if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
+               if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
                    unlikely(signal_group_exit(sig)))
-                       return 0;
+                       return false;
                /*
                 * There is no group stop already in progress.  We must
                 * initiate one now.
@@ -1894,29 +2041,31 @@ static int do_signal_stop(int signr)
                 */
                if (!(sig->flags & SIGNAL_STOP_STOPPED))
                        sig->group_exit_code = signr;
-               else
-                       WARN_ON_ONCE(!task_ptrace(current));
 
-               current->group_stop &= ~GROUP_STOP_SIGMASK;
-               current->group_stop |= signr | gstop;
-               sig->group_stop_count = 1;
+               sig->group_stop_count = 0;
+
+               if (task_set_jobctl_pending(current, signr | gstop))
+                       sig->group_stop_count++;
+
                for (t = next_thread(current); t != current;
                     t = next_thread(t)) {
-                       t->group_stop &= ~GROUP_STOP_SIGMASK;
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
                         * so this check has no races.
                         */
-                       if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
-                               t->group_stop |= signr | gstop;
+                       if (!task_is_stopped(t) &&
+                           task_set_jobctl_pending(t, signr | gstop)) {
                                sig->group_stop_count++;
-                               signal_wake_up(t, 0);
+                               if (likely(!(t->ptrace & PT_SEIZED)))
+                                       signal_wake_up(t, 0);
+                               else
+                                       ptrace_trap_notify(t);
                        }
                }
        }
-retry:
-       if (likely(!task_ptrace(current))) {
+
+       if (likely(!current->ptrace)) {
                int notify = 0;
 
                /*
@@ -1946,44 +2095,65 @@ retry:
                }
 
                /* Now we don't run again until woken by SIGCONT or SIGKILL */
-               schedule();
-
-               spin_lock_irq(&current->sighand->siglock);
+               freezable_schedule();
+               return true;
        } else {
-               ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
-                           CLD_STOPPED, 0, NULL);
-               current->exit_code = 0;
+               /*
+                * While ptraced, group stop is handled by STOP trap.
+                * Schedule it and let the caller deal with it.
+                */
+               task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
+               return false;
        }
+}
 
-       /*
-        * GROUP_STOP_PENDING could be set if another group stop has
-        * started since being woken up or ptrace wants us to transit
-        * between TASK_STOPPED and TRACED.  Retry group stop.
-        */
-       if (current->group_stop & GROUP_STOP_PENDING) {
-               WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
-               goto retry;
+/**
+ * do_jobctl_trap - take care of ptrace jobctl traps
+ *
+ * When PT_SEIZED, it's used for both group stop and explicit
+ * SEIZE/INTERRUPT traps.  Both generate PTRACE_EVENT_STOP trap with
+ * accompanying siginfo.  If stopped, lower eight bits of exit_code contain
+ * the stop signal; otherwise, %SIGTRAP.
+ *
+ * When !PT_SEIZED, it's used only for group stop trap with stop signal
+ * number as exit_code and no siginfo.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which may be
+ * released and re-acquired before returning with intervening sleep.
+ */
+static void do_jobctl_trap(void)
+{
+       struct signal_struct *signal = current->signal;
+       int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
+
+       if (current->ptrace & PT_SEIZED) {
+               if (!signal->group_stop_count &&
+                   !(signal->flags & SIGNAL_STOP_STOPPED))
+                       signr = SIGTRAP;
+               WARN_ON_ONCE(!signr);
+               ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
+                                CLD_STOPPED);
+       } else {
+               WARN_ON_ONCE(!signr);
+               ptrace_stop(signr, CLD_STOPPED, 0, NULL);
+               current->exit_code = 0;
        }
-
-       /* PTRACE_ATTACH might have raced with task killing, clear trapping */
-       task_clear_group_stop_trapping(current);
-
-       spin_unlock_irq(&current->sighand->siglock);
-
-       tracehook_finish_jctl();
-
-       return 1;
 }
 
-static int ptrace_signal(int signr, siginfo_t *info,
-                        struct pt_regs *regs, void *cookie)
+static int ptrace_signal(int signr, siginfo_t *info)
 {
-       if (!task_ptrace(current))
-               return signr;
-
-       ptrace_signal_deliver(regs, cookie);
-
-       /* Let the debugger run.  */
+       ptrace_signal_deliver();
+       /*
+        * We do not check sig_kernel_stop(signr) but set this marker
+        * unconditionally because we do not know whether debugger will
+        * change signr. This flag has no meaning unless we are going
+        * to stop after return from ptrace_stop(). In this case it will
+        * be checked in do_signal_stop(), we should only stop if it was
+        * not cleared by SIGCONT while we were sleeping. See also the
+        * comment in dequeue_signal().
+        */
+       current->jobctl |= JOBCTL_STOP_DEQUEUED;
        ptrace_stop(signr, CLD_TRAPPED, 0, info);
 
        /* We're back.  Did the debugger cancel the sig?  */
@@ -2003,8 +2173,11 @@ static int ptrace_signal(int signr, siginfo_t *info,
                info->si_signo = signr;
                info->si_errno = 0;
                info->si_code = SI_USER;
+               rcu_read_lock();
                info->si_pid = task_pid_vnr(current->parent);
-               info->si_uid = task_uid(current->parent);
+               info->si_uid = from_kuid_munged(current_user_ns(),
+                                               task_uid(current->parent));
+               rcu_read_unlock();
        }
 
        /* If the (new) signal is now blocked, requeue it.  */
@@ -2023,15 +2196,20 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
        struct signal_struct *signal = current->signal;
        int signr;
 
-relock:
+       if (unlikely(current->task_works))
+               task_work_run();
+
+       if (unlikely(uprobe_deny_signal()))
+               return 0;
+
        /*
-        * We'll jump back here after any time we were stopped in TASK_STOPPED.
-        * While in TASK_STOPPED, we were considered "frozen enough".
-        * Now that we woke up, it's crucial if we're supposed to be
-        * frozen that we freeze now before running anything substantial.
+        * Do this once, we can't return to user-mode if freezing() == T.
+        * do_signal_stop() and ptrace_stop() do freezable_schedule() and
+        * thus do not need another check after return.
         */
        try_to_freeze();
 
+relock:
        spin_lock_irq(&sighand->siglock);
        /*
         * Every stopped thread goes here after wakeup. Check to see if
@@ -2039,7 +2217,6 @@ relock:
         * the CLD_ si_code into SIGNAL_CLD_MASK bits.
         */
        if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
-               struct task_struct *leader;
                int why;
 
                if (signal->flags & SIGNAL_CLD_CONTINUED)
@@ -2060,13 +2237,11 @@ relock:
                 * a duplicate.
                 */
                read_lock(&tasklist_lock);
-
                do_notify_parent_cldstop(current, false, why);
 
-               leader = current->group_leader;
-               if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
-                       do_notify_parent_cldstop(leader, true, why);
-
+               if (ptrace_reparented(current->group_leader))
+                       do_notify_parent_cldstop(current->group_leader,
+                                               true, why);
                read_unlock(&tasklist_lock);
 
                goto relock;
@@ -2074,37 +2249,30 @@ relock:
 
        for (;;) {
                struct k_sigaction *ka;
-               /*
-                * Tracing can induce an artificial signal and choose sigaction.
-                * The return value in @signr determines the default action,
-                * but @info->si_signo is the signal number we will report.
-                */
-               signr = tracehook_get_signal(current, regs, info, return_ka);
-               if (unlikely(signr < 0))
+
+               if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
+                   do_signal_stop(0))
                        goto relock;
-               if (unlikely(signr != 0))
-                       ka = return_ka;
-               else {
-                       if (unlikely(current->group_stop &
-                                    GROUP_STOP_PENDING) && do_signal_stop(0))
-                               goto relock;
 
-                       signr = dequeue_signal(current, &current->blocked,
-                                              info);
+               if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
+                       do_jobctl_trap();
+                       spin_unlock_irq(&sighand->siglock);
+                       goto relock;
+               }
 
-                       if (!signr)
-                               break; /* will return 0 */
+               signr = dequeue_signal(current, &current->blocked, info);
 
-                       if (signr != SIGKILL) {
-                               signr = ptrace_signal(signr, info,
-                                                     regs, cookie);
-                               if (!signr)
-                                       continue;
-                       }
+               if (!signr)
+                       break; /* will return 0 */
 
-                       ka = &sighand->action[signr-1];
+               if (unlikely(current->ptrace) && signr != SIGKILL) {
+                       signr = ptrace_signal(signr, info);
+                       if (!signr)
+                               continue;
                }
 
+               ka = &sighand->action[signr-1];
+
                /* Trace actually delivered signals. */
                trace_signal_deliver(signr, info, ka);
 
@@ -2183,7 +2351,8 @@ relock:
 
                if (sig_kernel_coredump(signr)) {
                        if (print_fatal_signals)
-                               print_fatal_signal(regs, info->si_signo);
+                               print_fatal_signal(info->si_signo);
+                       proc_coredump_connector(current);
                        /*
                         * If it was able to dump core, this kills all
                         * other threads in the group and synchronizes with
@@ -2192,7 +2361,7 @@ relock:
                         * first and our do_group_exit call below will use
                         * that value and ignore the one we pass it.
                         */
-                       do_coredump(info->si_signo, info->si_signo, regs);
+                       do_coredump(info);
                }
 
                /*
@@ -2205,6 +2374,46 @@ relock:
        return signr;
 }
 
+/**
+ * signal_delivered - 
+ * @sig:               number of signal being delivered
+ * @info:              siginfo_t of signal being delivered
+ * @ka:                        sigaction setting that chose the handler
+ * @regs:              user register state
+ * @stepping:          nonzero if debugger single-step or block-step in use
+ *
+ * This function should be called when a signal has succesfully been
+ * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
+ * is always blocked, and the signal itself is blocked unless %SA_NODEFER
+ * is set in @ka->sa.sa_flags.  Tracing is notified.
+ */
+void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
+                       struct pt_regs *regs, int stepping)
+{
+       sigset_t blocked;
+
+       /* A signal was successfully delivered, and the
+          saved sigmask was stored on the signal frame,
+          and will be restored by sigreturn.  So we can
+          simply clear the restore sigmask flag.  */
+       clear_restore_sigmask();
+
+       sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
+       if (!(ka->sa.sa_flags & SA_NODEFER))
+               sigaddset(&blocked, sig);
+       set_current_blocked(&blocked);
+       tracehook_signal_handler(sig, info, ka, regs, stepping);
+}
+
+void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
+{
+       if (failed)
+               force_sigsegv(ksig->sig, current);
+       else
+               signal_delivered(ksig->sig, &ksig->info, &ksig->ka,
+                       signal_pt_regs(), stepping);
+}
+
 /*
  * It could be that complete_signal() picked us to notify about the
  * group-wide signal. Other threads should be notified now to take
@@ -2242,8 +2451,15 @@ void exit_signals(struct task_struct *tsk)
        int group_stop = 0;
        sigset_t unblocked;
 
+       /*
+        * @tsk is about to have PF_EXITING set - lock out users which
+        * expect stable threadgroup.
+        */
+       threadgroup_change_begin(tsk);
+
        if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
                tsk->flags |= PF_EXITING;
+               threadgroup_change_end(tsk);
                return;
        }
 
@@ -2253,6 +2469,9 @@ void exit_signals(struct task_struct *tsk)
         * see wants_signal(), do_signal_stop().
         */
        tsk->flags |= PF_EXITING;
+
+       threadgroup_change_end(tsk);
+
        if (!signal_pending(tsk))
                goto out;
 
@@ -2260,7 +2479,7 @@ void exit_signals(struct task_struct *tsk)
        signotset(&unblocked);
        retarget_shared_pending(tsk, &unblocked);
 
-       if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
+       if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
            task_participate_group_stop(tsk))
                group_stop = CLD_STOPPED;
 out:
@@ -2325,7 +2544,13 @@ static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
  * It is wrong to change ->blocked directly, this helper should be used
  * to ensure the process can't miss a shared signal we are going to block.
  */
-void set_current_blocked(const sigset_t *newset)
+void set_current_blocked(sigset_t *newset)
+{
+       sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
+       __set_current_blocked(newset);
+}
+
+void __set_current_blocked(const sigset_t *newset)
 {
        struct task_struct *tsk = current;
 
@@ -2365,7 +2590,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
                return -EINVAL;
        }
 
-       set_current_blocked(&newset);
+       __set_current_blocked(&newset);
        return 0;
 }
 
@@ -2406,40 +2631,95 @@ SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
        return 0;
 }
 
-long do_sigpending(void __user *set, unsigned long sigsetsize)
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
+               compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
 {
-       long error = -EINVAL;
-       sigset_t pending;
+#ifdef __BIG_ENDIAN
+       sigset_t old_set = current->blocked;
 
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (nset) {
+               compat_sigset_t new32;
+               sigset_t new_set;
+               int error;
+               if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
+                       return -EFAULT;
+
+               sigset_from_compat(&new_set, &new32);
+               sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
+
+               error = sigprocmask(how, &new_set, NULL);
+               if (error)
+                       return error;
+       }
+       if (oset) {
+               compat_sigset_t old32;
+               sigset_to_compat(&old32, &old_set);
+               if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
+                       return -EFAULT;
+       }
+       return 0;
+#else
+       return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
+                                 (sigset_t __user *)oset, sigsetsize);
+#endif
+}
+#endif
+
+static int do_sigpending(void *set, unsigned long sigsetsize)
+{
        if (sigsetsize > sizeof(sigset_t))
-               goto out;
+               return -EINVAL;
 
        spin_lock_irq(&current->sighand->siglock);
-       sigorsets(&pending, &current->pending.signal,
+       sigorsets(set, &current->pending.signal,
                  &current->signal->shared_pending.signal);
        spin_unlock_irq(&current->sighand->siglock);
 
        /* Outside the lock because only this thread touches it.  */
-       sigandsets(&pending, &current->blocked, &pending);
-
-       error = -EFAULT;
-       if (!copy_to_user(set, &pending, sigsetsize))
-               error = 0;
-
-out:
-       return error;
+       sigandsets(set, &current->blocked, set);
+       return 0;
 }
 
 /**
  *  sys_rt_sigpending - examine a pending signal that has been raised
  *                     while blocked
- *  @set: stores pending signals
+ *  @uset: stores pending signals
  *  @sigsetsize: size of sigset_t type or larger
  */
-SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
+SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
+{
+       sigset_t set;
+       int err = do_sigpending(&set, sigsetsize);
+       if (!err && copy_to_user(uset, &set, sigsetsize))
+               err = -EFAULT;
+       return err;
+}
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
+               compat_size_t, sigsetsize)
 {
-       return do_sigpending(set, sigsetsize);
+#ifdef __BIG_ENDIAN
+       sigset_t set;
+       int err = do_sigpending(&set, sigsetsize);
+       if (!err) {
+               compat_sigset_t set32;
+               sigset_to_compat(&set32, &set);
+               /* we can get here only if sigsetsize <= sizeof(set) */
+               if (copy_to_user(uset, &set32, sigsetsize))
+                       err = -EFAULT;
+       }
+       return err;
+#else
+       return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
+#endif
 }
+#endif
 
 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
 
@@ -2505,6 +2785,13 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
                err |= __put_user(from->si_uid, &to->si_uid);
                err |= __put_user(from->si_ptr, &to->si_ptr);
                break;
+#ifdef __ARCH_SIGSYS
+       case __SI_SYS:
+               err |= __put_user(from->si_call_addr, &to->si_call_addr);
+               err |= __put_user(from->si_syscall, &to->si_syscall);
+               err |= __put_user(from->si_arch, &to->si_arch);
+               break;
+#endif
        default: /* this is just in case for now ... */
                err |= __put_user(from->si_pid, &to->si_pid);
                err |= __put_user(from->si_uid, &to->si_uid);
@@ -2627,7 +2914,7 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
        info.si_errno = 0;
        info.si_code = SI_USER;
        info.si_pid = task_tgid_vnr(current);
-       info.si_uid = current_uid();
+       info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
 
        return kill_something_info(sig, &info, pid);
 }
@@ -2664,13 +2951,13 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
 
 static int do_tkill(pid_t tgid, pid_t pid, int sig)
 {
-       struct siginfo info;
+       struct siginfo info = {};
 
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_TKILL;
        info.si_pid = task_tgid_vnr(current);
-       info.si_uid = current_uid();
+       info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
 
        return do_send_specific(tgid, pid, sig, &info);
 }
@@ -2710,6 +2997,23 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
        return do_tkill(0, pid, sig);
 }
 
+static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
+{
+       /* Not even root can pretend to send signals from the kernel.
+        * Nor can they impersonate a kill()/tgkill(), which adds source info.
+        */
+       if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
+           (task_pid_vnr(current) != pid)) {
+               /* We used to allow any < 0 si_code */
+               WARN_ON_ONCE(info->si_code < 0);
+               return -EPERM;
+       }
+       info->si_signo = sig;
+
+       /* POSIX.1b doesn't mention process groups.  */
+       return kill_proc_info(sig, info, pid);
+}
+
 /**
  *  sys_rt_sigqueueinfo - send signal information to a signal
  *  @pid: the PID of the thread
@@ -2720,25 +3024,26 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
                siginfo_t __user *, uinfo)
 {
        siginfo_t info;
-
        if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
                return -EFAULT;
+       return do_rt_sigqueueinfo(pid, sig, &info);
+}
 
-       /* Not even root can pretend to send signals from the kernel.
-        * Nor can they impersonate a kill()/tgkill(), which adds source info.
-        */
-       if (info.si_code >= 0 || info.si_code == SI_TKILL) {
-               /* We used to allow any < 0 si_code */
-               WARN_ON_ONCE(info.si_code < 0);
-               return -EPERM;
-       }
-       info.si_signo = sig;
-
-       /* POSIX.1b doesn't mention process groups.  */
-       return kill_proc_info(sig, &info, pid);
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
+                       compat_pid_t, pid,
+                       int, sig,
+                       struct compat_siginfo __user *, uinfo)
+{
+       siginfo_t info;
+       int ret = copy_siginfo_from_user32(&info, uinfo);
+       if (unlikely(ret))
+               return ret;
+       return do_rt_sigqueueinfo(pid, sig, &info);
 }
+#endif
 
-long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
+static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
 {
        /* This is only valid for single tasks */
        if (pid <= 0 || tgid <= 0)
@@ -2747,7 +3052,8 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
        /* Not even root can pretend to send signals from the kernel.
         * Nor can they impersonate a kill()/tgkill(), which adds source info.
         */
-       if (info->si_code >= 0 || info->si_code == SI_TKILL) {
+       if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
+           (task_pid_vnr(current) != pid)) {
                /* We used to allow any < 0 si_code */
                WARN_ON_ONCE(info->si_code < 0);
                return -EPERM;
@@ -2768,6 +3074,21 @@ SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
        return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
+                       compat_pid_t, tgid,
+                       compat_pid_t, pid,
+                       int, sig,
+                       struct compat_siginfo __user *, uinfo)
+{
+       siginfo_t info;
+
+       if (copy_siginfo_from_user32(&info, uinfo))
+               return -EFAULT;
+       return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
+}
+#endif
+
 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
 {
        struct task_struct *t = current;
@@ -2813,7 +3134,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
        return 0;
 }
 
-int 
+static int 
 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
 {
        stack_t oss;
@@ -2878,6 +3199,76 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
 out:
        return error;
 }
+SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
+{
+       return do_sigaltstack(uss, uoss, current_user_stack_pointer());
+}
+
+int restore_altstack(const stack_t __user *uss)
+{
+       int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
+       /* squash all but EFAULT for now */
+       return err == -EFAULT ? err : 0;
+}
+
+int __save_altstack(stack_t __user *uss, unsigned long sp)
+{
+       struct task_struct *t = current;
+       return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
+               __put_user(sas_ss_flags(sp), &uss->ss_flags) |
+               __put_user(t->sas_ss_size, &uss->ss_size);
+}
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(sigaltstack,
+                       const compat_stack_t __user *, uss_ptr,
+                       compat_stack_t __user *, uoss_ptr)
+{
+       stack_t uss, uoss;
+       int ret;
+       mm_segment_t seg;
+
+       if (uss_ptr) {
+               compat_stack_t uss32;
+
+               memset(&uss, 0, sizeof(stack_t));
+               if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
+                       return -EFAULT;
+               uss.ss_sp = compat_ptr(uss32.ss_sp);
+               uss.ss_flags = uss32.ss_flags;
+               uss.ss_size = uss32.ss_size;
+       }
+       seg = get_fs();
+       set_fs(KERNEL_DS);
+       ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
+                            (stack_t __force __user *) &uoss,
+                            compat_user_stack_pointer());
+       set_fs(seg);
+       if (ret >= 0 && uoss_ptr)  {
+               if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
+                   __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
+                   __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
+                   __put_user(uoss.ss_size, &uoss_ptr->ss_size))
+                       ret = -EFAULT;
+       }
+       return ret;
+}
+
+int compat_restore_altstack(const compat_stack_t __user *uss)
+{
+       int err = compat_sys_sigaltstack(uss, NULL);
+       /* squash all but -EFAULT for now */
+       return err == -EFAULT ? err : 0;
+}
+
+int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
+{
+       struct task_struct *t = current;
+       return  __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
+               __put_user(sas_ss_flags(sp), &uss->ss_flags) |
+               __put_user(t->sas_ss_size, &uss->ss_size);
+}
+#endif
 
 #ifdef __ARCH_WANT_SYS_SIGPENDING
 
@@ -2887,7 +3278,7 @@ out:
  */
 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
 {
-       return do_sigpending(set, sizeof(*set));
+       return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); 
 }
 
 #endif
@@ -2914,7 +3305,6 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
        if (nset) {
                if (copy_from_user(&new_set, nset, sizeof(*nset)))
                        return -EFAULT;
-               new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
 
                new_blocked = current->blocked;
 
@@ -2944,7 +3334,7 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
 }
 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
 
-#ifdef __ARCH_WANT_SYS_RT_SIGACTION
+#ifndef CONFIG_ODD_RT_SIGACTION
 /**
  *  sys_rt_sigaction - alter an action taken by a process
  *  @sig: signal to be sent
@@ -2978,7 +3368,132 @@ SYSCALL_DEFINE4(rt_sigaction, int, sig,
 out:
        return ret;
 }
-#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
+               const struct compat_sigaction __user *, act,
+               struct compat_sigaction __user *, oact,
+               compat_size_t, sigsetsize)
+{
+       struct k_sigaction new_ka, old_ka;
+       compat_sigset_t mask;
+#ifdef __ARCH_HAS_SA_RESTORER
+       compat_uptr_t restorer;
+#endif
+       int ret;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(compat_sigset_t))
+               return -EINVAL;
+
+       if (act) {
+               compat_uptr_t handler;
+               ret = get_user(handler, &act->sa_handler);
+               new_ka.sa.sa_handler = compat_ptr(handler);
+#ifdef __ARCH_HAS_SA_RESTORER
+               ret |= get_user(restorer, &act->sa_restorer);
+               new_ka.sa.sa_restorer = compat_ptr(restorer);
+#endif
+               ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
+               ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+               if (ret)
+                       return -EFAULT;
+               sigset_from_compat(&new_ka.sa.sa_mask, &mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+       if (!ret && oact) {
+               sigset_to_compat(&mask, &old_ka.sa.sa_mask);
+               ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), 
+                              &oact->sa_handler);
+               ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
+               ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+#ifdef __ARCH_HAS_SA_RESTORER
+               ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
+                               &oact->sa_restorer);
+#endif
+       }
+       return ret;
+}
+#endif
+#endif /* !CONFIG_ODD_RT_SIGACTION */
+
+#ifdef CONFIG_OLD_SIGACTION
+SYSCALL_DEFINE3(sigaction, int, sig,
+               const struct old_sigaction __user *, act,
+               struct old_sigaction __user *, oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+
+       if (act) {
+               old_sigset_t mask;
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
+                       return -EFAULT;
+#ifdef __ARCH_HAS_KA_RESTORER
+               new_ka.ka_restorer = NULL;
+#endif
+               siginitset(&new_ka.sa.sa_mask, mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+       if (!ret && oact) {
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
+                       return -EFAULT;
+       }
+
+       return ret;
+}
+#endif
+#ifdef CONFIG_COMPAT_OLD_SIGACTION
+COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
+               const struct compat_old_sigaction __user *, act,
+               struct compat_old_sigaction __user *, oact)
+{
+       struct k_sigaction new_ka, old_ka;
+       int ret;
+       compat_old_sigset_t mask;
+       compat_uptr_t handler, restorer;
+
+       if (act) {
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+                   __get_user(handler, &act->sa_handler) ||
+                   __get_user(restorer, &act->sa_restorer) ||
+                   __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+                   __get_user(mask, &act->sa_mask))
+                       return -EFAULT;
+
+#ifdef __ARCH_HAS_KA_RESTORER
+               new_ka.ka_restorer = NULL;
+#endif
+               new_ka.sa.sa_handler = compat_ptr(handler);
+               new_ka.sa.sa_restorer = compat_ptr(restorer);
+               siginitset(&new_ka.sa.sa_mask, mask);
+       }
+
+       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+       if (!ret && oact) {
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+                   __put_user(ptr_to_compat(old_ka.sa.sa_handler),
+                              &oact->sa_handler) ||
+                   __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
+                              &oact->sa_restorer) ||
+                   __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+                   __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
+                       return -EFAULT;
+       }
+       return ret;
+}
+#endif
 
 #ifdef __ARCH_WANT_SYS_SGETMASK
 
@@ -2993,15 +3508,11 @@ SYSCALL_DEFINE0(sgetmask)
 
 SYSCALL_DEFINE1(ssetmask, int, newmask)
 {
-       int old;
-
-       spin_lock_irq(&current->sighand->siglock);
-       old = current->blocked.sig[0];
+       int old = current->blocked.sig[0];
+       sigset_t newset;
 
-       siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
-                                                 sigmask(SIGSTOP)));
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
+       siginitset(&newset, newmask);
+       set_current_blocked(&newset);
 
        return old;
 }
@@ -3039,7 +3550,17 @@ SYSCALL_DEFINE0(pause)
 
 #endif
 
-#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
+int sigsuspend(sigset_t *set)
+{
+       current->saved_sigmask = current->blocked;
+       set_current_blocked(set);
+
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_restore_sigmask();
+       return -ERESTARTNOHAND;
+}
+
 /**
  *  sys_rt_sigsuspend - replace the signal mask for a value with the
  *     @unewset value until a signal is received
@@ -3056,20 +3577,47 @@ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
 
        if (copy_from_user(&newset, unewset, sizeof(newset)))
                return -EFAULT;
-       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+       return sigsuspend(&newset);
+}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
+{
+#ifdef __BIG_ENDIAN
+       sigset_t newset;
+       compat_sigset_t newset32;
 
-       spin_lock_irq(&current->sighand->siglock);
-       current->saved_sigmask = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
 
-       current->state = TASK_INTERRUPTIBLE;
-       schedule();
-       set_restore_sigmask();
-       return -ERESTARTNOHAND;
+       if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
+               return -EFAULT;
+       sigset_from_compat(&newset, &newset32);
+       return sigsuspend(&newset);
+#else
+       /* on little-endian bitmaps don't care about granularity */
+       return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
+#endif
 }
-#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
+#endif
+
+#ifdef CONFIG_OLD_SIGSUSPEND
+SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
+{
+       sigset_t blocked;
+       siginitset(&blocked, mask);
+       return sigsuspend(&blocked);
+}
+#endif
+#ifdef CONFIG_OLD_SIGSUSPEND3
+SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
+{
+       sigset_t blocked;
+       siginitset(&blocked, mask);
+       return sigsuspend(&blocked);
+}
+#endif
 
 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
 {