mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
exec: simplify the new ->sighand allocation
de_thread() pre-allocates newsighand to make sure that exec() can't fail after killing all sub-threads. Imho, this buys nothing, but complicates the code: - this is (mostly) needed to handle CLONE_SIGHAND without CLONE_THREAD tasks, this is very unlikely (if ever used) case - unless we already have some serious problems, GFP_KERNEL allocation should not fail - ENOMEM still can happen after de_thread(), ->sighand is not the last object we have to allocate Change the code to allocate the new ->sighand on demand. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0840a90d94
commit
b2c903b879
1 changed files with 9 additions and 15 deletions
24
fs/exec.c
24
fs/exec.c
|
@ -747,7 +747,7 @@ static int exec_mmap(struct mm_struct *mm)
|
|||
static int de_thread(struct task_struct *tsk)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
|
||||
struct sighand_struct *oldsighand = tsk->sighand;
|
||||
spinlock_t *lock = &oldsighand->siglock;
|
||||
struct task_struct *leader = NULL;
|
||||
int count;
|
||||
|
@ -761,10 +761,6 @@ static int de_thread(struct task_struct *tsk)
|
|||
return 0;
|
||||
}
|
||||
|
||||
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
|
||||
if (!newsighand)
|
||||
return -ENOMEM;
|
||||
|
||||
if (thread_group_empty(tsk))
|
||||
goto no_thread_group;
|
||||
|
||||
|
@ -781,7 +777,6 @@ static int de_thread(struct task_struct *tsk)
|
|||
*/
|
||||
spin_unlock_irq(lock);
|
||||
read_unlock(&tasklist_lock);
|
||||
kmem_cache_free(sighand_cachep, newsighand);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -899,17 +894,16 @@ no_thread_group:
|
|||
if (leader)
|
||||
release_task(leader);
|
||||
|
||||
if (atomic_read(&oldsighand->count) == 1) {
|
||||
if (atomic_read(&oldsighand->count) != 1) {
|
||||
struct sighand_struct *newsighand;
|
||||
/*
|
||||
* Now that we nuked the rest of the thread group,
|
||||
* it turns out we are not sharing sighand any more either.
|
||||
* So we can just keep it.
|
||||
*/
|
||||
kmem_cache_free(sighand_cachep, newsighand);
|
||||
} else {
|
||||
/*
|
||||
* Move our state over to newsighand and switch it in.
|
||||
* This ->sighand is shared with the CLONE_SIGHAND
|
||||
* but not CLONE_THREAD task, switch to the new one.
|
||||
*/
|
||||
newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
|
||||
if (!newsighand)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&newsighand->count, 1);
|
||||
memcpy(newsighand->action, oldsighand->action,
|
||||
sizeof(newsighand->action));
|
||||
|
|
Loading…
Reference in a new issue