mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[PATCH] kill PF_DEAD flag
After the previous change (->flags & PF_DEAD) <=> (->state == EXIT_DEAD), we don't need PF_DEAD any longer. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
29b8849216
commit
55a101f8f7
4 changed files with 12 additions and 15 deletions
|
@ -1061,7 +1061,6 @@ static inline void put_task_struct(struct task_struct *t)
|
||||||
/* Not implemented yet, only for 486*/
|
/* Not implemented yet, only for 486*/
|
||||||
#define PF_STARTING 0x00000002 /* being created */
|
#define PF_STARTING 0x00000002 /* being created */
|
||||||
#define PF_EXITING 0x00000004 /* getting shut down */
|
#define PF_EXITING 0x00000004 /* getting shut down */
|
||||||
#define PF_DEAD 0x00000008 /* Dead */
|
|
||||||
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
|
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
|
||||||
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
|
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
|
||||||
#define PF_DUMPCORE 0x00000200 /* dumped core */
|
#define PF_DUMPCORE 0x00000200 /* dumped core */
|
||||||
|
|
|
@ -953,10 +953,8 @@ fastcall NORET_TYPE void do_exit(long code)
|
||||||
if (tsk->splice_pipe)
|
if (tsk->splice_pipe)
|
||||||
__free_pipe_info(tsk->splice_pipe);
|
__free_pipe_info(tsk->splice_pipe);
|
||||||
|
|
||||||
/* PF_DEAD causes final put_task_struct after we schedule. */
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
BUG_ON(tsk->flags & PF_DEAD);
|
/* causes final put_task_struct in finish_task_switch(). */
|
||||||
tsk->flags |= PF_DEAD;
|
|
||||||
tsk->state = EXIT_DEAD;
|
tsk->state = EXIT_DEAD;
|
||||||
|
|
||||||
schedule();
|
schedule();
|
||||||
|
@ -972,7 +970,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code)
|
||||||
{
|
{
|
||||||
if (comp)
|
if (comp)
|
||||||
complete(comp);
|
complete(comp);
|
||||||
|
|
||||||
do_exit(code);
|
do_exit(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1755,27 +1755,27 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
||||||
__releases(rq->lock)
|
__releases(rq->lock)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = rq->prev_mm;
|
struct mm_struct *mm = rq->prev_mm;
|
||||||
unsigned long prev_task_flags;
|
long prev_state;
|
||||||
|
|
||||||
rq->prev_mm = NULL;
|
rq->prev_mm = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A task struct has one reference for the use as "current".
|
* A task struct has one reference for the use as "current".
|
||||||
* If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and
|
* If a task dies, then it sets EXIT_DEAD in tsk->state and calls
|
||||||
* calls schedule one last time. The schedule call will never return,
|
* schedule one last time. The schedule call will never return, and
|
||||||
* and the scheduled task must drop that reference.
|
* the scheduled task must drop that reference.
|
||||||
* The test for EXIT_ZOMBIE must occur while the runqueue locks are
|
* The test for EXIT_DEAD must occur while the runqueue locks are
|
||||||
* still held, otherwise prev could be scheduled on another cpu, die
|
* still held, otherwise prev could be scheduled on another cpu, die
|
||||||
* there before we look at prev->state, and then the reference would
|
* there before we look at prev->state, and then the reference would
|
||||||
* be dropped twice.
|
* be dropped twice.
|
||||||
* Manfred Spraul <manfred@colorfullife.com>
|
* Manfred Spraul <manfred@colorfullife.com>
|
||||||
*/
|
*/
|
||||||
prev_task_flags = prev->flags;
|
prev_state = prev->state;
|
||||||
finish_arch_switch(prev);
|
finish_arch_switch(prev);
|
||||||
finish_lock_switch(rq, prev);
|
finish_lock_switch(rq, prev);
|
||||||
if (mm)
|
if (mm)
|
||||||
mmdrop(mm);
|
mmdrop(mm);
|
||||||
if (unlikely(prev_task_flags & PF_DEAD)) {
|
if (unlikely(prev_state == EXIT_DEAD)) {
|
||||||
/*
|
/*
|
||||||
* Remove function-return probe instances associated with this
|
* Remove function-return probe instances associated with this
|
||||||
* task and put them back on the free list.
|
* task and put them back on the free list.
|
||||||
|
@ -5153,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
|
||||||
BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
|
BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
|
||||||
|
|
||||||
/* Cannot have done final schedule yet: would have vanished. */
|
/* Cannot have done final schedule yet: would have vanished. */
|
||||||
BUG_ON(p->flags & PF_DEAD);
|
BUG_ON(p->state == EXIT_DEAD);
|
||||||
|
|
||||||
get_task_struct(p);
|
get_task_struct(p);
|
||||||
|
|
||||||
|
|
|
@ -226,8 +226,8 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
|
||||||
releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
|
releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
|
||||||
p->flags & PF_EXITING;
|
p->flags & PF_EXITING;
|
||||||
if (releasing) {
|
if (releasing) {
|
||||||
/* PF_DEAD tasks have already released their mm */
|
/* TASK_DEAD tasks have already released their mm */
|
||||||
if (p->flags & PF_DEAD)
|
if (p->state == EXIT_DEAD)
|
||||||
continue;
|
continue;
|
||||||
if (p->flags & PF_EXITING && p == current) {
|
if (p->flags & PF_EXITING && p == current) {
|
||||||
chosen = p;
|
chosen = p;
|
||||||
|
|
Loading…
Reference in a new issue