signals: make task_struct->signal immutable/refcountable
authorOleg Nesterov <oleg@redhat.com>
Wed, 26 May 2010 21:43:16 +0000 (14:43 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 27 May 2010 16:12:46 +0000 (09:12 -0700)
We have a lot of problems with accessing task_struct->signal, it can
"disappear" at any moment.  Even current can't use its ->signal safely
after exit_notify().  ->siglock helps, but it is not convenient, not
always possible, and sometimes it makes sense to use task->signal even
after this task has already dead.

This patch adds the reference counter, sigcnt, into signal_struct.  This
reference is owned by task_struct and it is dropped in
__put_task_struct().  Perhaps it makes sense to export
get/put_signal_struct() later, but currently I don't see the immediate
reason.

Rename __cleanup_signal() to free_signal_struct() and unexport it.  With
the previous changes it does nothing except kmem_cache_free().

Change __exit_signal() to not clear/free ->signal, it will be freed when
the last reference to any thread in the thread group goes away.

Note:
- when the last thead exits signal->tty can point to nowhere, see
  the next patch.

- with or without this patch signal_struct->count should go away,
  or at least it should be "int nr_threads" for fs/proc. This will
  be addressed later.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Alan Cox <alan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/sched.h
kernel/exit.c
kernel/fork.c

index a95a2455cebe8f9041347dc090595bba64e69b09..32e309df408ca355ce0a912a4ee2231dfa0ae045 100644 (file)
@@ -527,6 +527,7 @@ struct thread_group_cputimer {
  * the locking of signal_struct.
  */
 struct signal_struct {
+       atomic_t                sigcnt;
        atomic_t                count;
        atomic_t                live;
 
@@ -2101,7 +2102,6 @@ extern void flush_thread(void);
 extern void exit_thread(void);
 
 extern void exit_files(struct task_struct *);
-extern void __cleanup_signal(struct signal_struct *);
 extern void __cleanup_sighand(struct sighand_struct *);
 
 extern void exit_itimers(struct signal_struct *);
index 4a72f1753edb247dd617eee602d5bb2f4a1cde3d..92af5cde9bbe1414077f17421dd0c9f5515f7209 100644 (file)
@@ -134,8 +134,6 @@ static void __exit_signal(struct task_struct *tsk)
         * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
         */
        flush_sigqueue(&tsk->pending);
-
-       tsk->signal = NULL;
        tsk->sighand = NULL;
        spin_unlock(&sighand->siglock);
 
@@ -150,7 +148,6 @@ static void __exit_signal(struct task_struct *tsk)
                 */
                task_rq_unlock_wait(tsk);
                tty_kref_put(sig->tty);
-               __cleanup_signal(sig);
        }
 }
 
index b7879ef6e7cdb68c1c5aa8ba54077507aa9d5481..e08e3012cd6b4e1cf90e4b025ec77c17612c247c 100644 (file)
@@ -165,6 +165,18 @@ void free_task(struct task_struct *tsk)
 }
 EXPORT_SYMBOL(free_task);
 
+static inline void free_signal_struct(struct signal_struct *sig)
+{
+       thread_group_cputime_free(sig);
+       kmem_cache_free(signal_cachep, sig);
+}
+
+static inline void put_signal_struct(struct signal_struct *sig)
+{
+       if (atomic_dec_and_test(&sig->sigcnt))
+               free_signal_struct(sig);
+}
+
 void __put_task_struct(struct task_struct *tsk)
 {
        WARN_ON(!tsk->exit_state);
@@ -173,6 +185,7 @@ void __put_task_struct(struct task_struct *tsk)
 
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
+       put_signal_struct(tsk->signal);
 
        if (!profile_handoff_task(tsk))
                free_task(tsk);
@@ -864,6 +877,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        if (!sig)
                return -ENOMEM;
 
+       atomic_set(&sig->sigcnt, 1);
        atomic_set(&sig->count, 1);
        atomic_set(&sig->live, 1);
        init_waitqueue_head(&sig->wait_chldexit);
@@ -889,12 +903,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        return 0;
 }
 
-void __cleanup_signal(struct signal_struct *sig)
-{
-       thread_group_cputime_free(sig);
-       kmem_cache_free(signal_cachep, sig);
-}
-
 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
 {
        unsigned long new_flags = p->flags;
@@ -1248,6 +1256,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        }
 
        if (clone_flags & CLONE_THREAD) {
+               atomic_inc(&current->signal->sigcnt);
                atomic_inc(&current->signal->count);
                atomic_inc(&current->signal->live);
                p->group_leader = current->group_leader;
@@ -1294,7 +1303,7 @@ bad_fork_cleanup_mm:
                mmput(p->mm);
 bad_fork_cleanup_signal:
        if (!(clone_flags & CLONE_THREAD))
-               __cleanup_signal(p->signal);
+               free_signal_struct(p->signal);
 bad_fork_cleanup_sighand:
        __cleanup_sighand(p->sighand);
 bad_fork_cleanup_fs: