aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland McGrath <roland@redhat.com>2005-10-21 15:03:29 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-21 15:38:08 -0700
commit25f407f0b668f5e4ebd5d13e1fb4306ba6427ead (patch)
tree10d8661419da9e0d6be6d22ef319582d052c4a26
parent9465bee863bc4c6cf1566c12d6f92a8133e3da5c (diff)
downloadlinux-25f407f0b668f5e4ebd5d13e1fb4306ba6427ead.tar.gz
[PATCH] Call exit_itimers from do_exit, not __exit_signal
When I originally moved exit_itimers into __exit_signal, that was the only place where we could reliably know it was the last thread in the group dying, without races. Since then we've gotten the signal_struct.live counter, and do_exit can reliably do group-wide cleanup work. This patch moves the call to do_exit, where it's made without locks. This avoids the deadlock issues that the old __exit_signal code's comment talks about, and the one that Oleg found recently with process CPU timers. [ This replaces e03d13e985d48ac4885382c9e3b1510c78bd047f, which is why it was just reverted. ] Signed-off-by: Roland McGrath <roland@redhat.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/signal.c14
3 files changed, 3 insertions, 14 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 43077732619bee..3b25b182d2be35 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -843,6 +843,7 @@ fastcall NORET_TYPE void do_exit(long code)
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
del_timer_sync(&tsk->signal->real_timer);
+ exit_itimers(tsk->signal);
acct_process(code);
}
exit_mm(tsk);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index b7b532acd9fc41..dda3cda73c77c2 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -1157,7 +1157,7 @@ retry_delete:
}
/*
- * This is called by __exit_signal, only when there are no more
+ * This is called by do_exit or de_thread, only when there are no more
* references to the shared signal_struct.
*/
void exit_itimers(struct signal_struct *sig)
diff --git a/kernel/signal.c b/kernel/signal.c
index 50c992643771e0..f2b96b08fb4472 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
flush_sigqueue(&tsk->pending);
if (sig) {
/*
- * We are cleaning up the signal_struct here. We delayed
- * calling exit_itimers until after flush_sigqueue, just in
- * case our thread-local pending queue contained a queued
- * timer signal that would have been cleared in
- * exit_itimers. When that called sigqueue_free, it would
- * attempt to re-take the tasklist_lock and deadlock. This
- * can never happen if we ensure that all queues the
- * timer's signal might be queued on have been flushed
- * first. The shared_pending queue, and our own pending
- * queue are the only queues the timer could be on, since
- * there are no other threads left in the group and timer
- * signals are constrained to threads inside the group.
+ * We are cleaning up the signal_struct here.
*/
- exit_itimers(sig);
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
}