mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
[PATCH] coredump: shutdown current process first
This patch optimizes zap_threads() for the case when there are no ->mm users except the current's thread group. In that case we can avoid 'for_each_process()' loop. It also adds a useful invariant: SIGNAL_GROUP_EXIT (if checked under ->siglock) always implies that all threads (except may be current) have pending SIGKILL. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
dcf560c593
commit
5debfa6da5
1 changed files with 17 additions and 12 deletions
29
fs/exec.c
29
fs/exec.c
|
@ -1371,13 +1371,7 @@ static void format_corename(char *corename, const char *pattern, long signr)
|
|||
static void zap_process(struct task_struct *start)
|
||||
{
|
||||
struct task_struct *t;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* start->sighand can't disappear, but may be
|
||||
* changed by de_thread()
|
||||
*/
|
||||
lock_task_sighand(start, &flags);
|
||||
start->signal->flags = SIGNAL_GROUP_EXIT;
|
||||
start->signal->group_stop_count = 0;
|
||||
|
||||
|
@ -1389,40 +1383,51 @@ static void zap_process(struct task_struct *start)
|
|||
signal_wake_up(t, 1);
|
||||
}
|
||||
} while ((t = next_thread(t)) != start);
|
||||
|
||||
unlock_task_sighand(start, &flags);
|
||||
}
|
||||
|
||||
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
||||
int exit_code)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
unsigned long flags;
|
||||
int err = -EAGAIN;
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
|
||||
tsk->signal->flags = SIGNAL_GROUP_EXIT;
|
||||
tsk->signal->group_exit_code = exit_code;
|
||||
tsk->signal->group_stop_count = 0;
|
||||
zap_process(tsk);
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
|
||||
goto done;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_process(g) {
|
||||
if (g == tsk->group_leader)
|
||||
continue;
|
||||
|
||||
p = g;
|
||||
do {
|
||||
if (p->mm) {
|
||||
if (p->mm == mm)
|
||||
if (p->mm == mm) {
|
||||
/*
|
||||
* p->sighand can't disappear, but
|
||||
* may be changed by de_thread()
|
||||
*/
|
||||
lock_task_sighand(p, &flags);
|
||||
zap_process(p);
|
||||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} while ((p = next_thread(p)) != g);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
done:
|
||||
return mm->core_waiters;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue