mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[PATCH] coredump: kill ptrace related stuff
With this patch zap_process() sets SIGNAL_GROUP_EXIT while sending SIGKILL to the thread group. This means that a TASK_TRACED task 1. Will be awakened by signal_wake_up(1) 2. Can't sleep again via ptrace_notify() 3. Can't go to do_signal_stop() after return from ptrace_stop() in get_signal_to_deliver() So we can remove all ptrace related stuff from coredump path. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
281de339ce
commit
d5f70c00ad
4 changed files with 37 additions and 32 deletions
30
fs/exec.c
30
fs/exec.c
|
@ -1368,12 +1368,14 @@ static void format_corename(char *corename, const char *pattern, long signr)
|
|||
*out_ptr = 0;
|
||||
}
|
||||
|
||||
static void zap_process(struct task_struct *start, int *ptraced)
|
||||
static void zap_process(struct task_struct *start)
|
||||
{
|
||||
struct task_struct *t;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&start->sighand->siglock, flags);
|
||||
start->signal->flags = SIGNAL_GROUP_EXIT;
|
||||
start->signal->group_stop_count = 0;
|
||||
|
||||
t = start;
|
||||
do {
|
||||
|
@ -1381,22 +1383,17 @@ static void zap_process(struct task_struct *start, int *ptraced)
|
|||
t->mm->core_waiters++;
|
||||
sigaddset(&t->pending.signal, SIGKILL);
|
||||
signal_wake_up(t, 1);
|
||||
|
||||
if (unlikely(t->ptrace) &&
|
||||
unlikely(t->parent->mm == t->mm))
|
||||
*ptraced = 1;
|
||||
}
|
||||
} while ((t = next_thread(t)) != start);
|
||||
|
||||
spin_unlock_irqrestore(&start->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
static void zap_threads (struct mm_struct *mm)
|
||||
static void zap_threads(struct mm_struct *mm)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
struct task_struct *tsk = current;
|
||||
struct completion *vfork_done = tsk->vfork_done;
|
||||
int traced = 0;
|
||||
|
||||
/*
|
||||
* Make sure nobody is waiting for us to release the VM,
|
||||
|
@ -1413,29 +1410,12 @@ static void zap_threads (struct mm_struct *mm)
|
|||
do {
|
||||
if (p->mm) {
|
||||
if (p->mm == mm)
|
||||
zap_process(p, &traced);
|
||||
zap_process(p);
|
||||
break;
|
||||
}
|
||||
} while ((p = next_thread(p)) != g);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (unlikely(traced)) {
|
||||
/*
|
||||
* We are zapping a thread and the thread it ptraces.
|
||||
* If the tracee went into a ptrace stop for exit tracing,
|
||||
* we could deadlock since the tracer is waiting for this
|
||||
* coredump to finish. Detach them so they can both die.
|
||||
*/
|
||||
write_lock_irq(&tasklist_lock);
|
||||
do_each_thread(g,p) {
|
||||
if (mm == p->mm && p != tsk &&
|
||||
p->ptrace && p->parent->mm == mm) {
|
||||
__ptrace_detach(p, 0);
|
||||
}
|
||||
} while_each_thread(g,p);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void coredump_wait(struct mm_struct *mm)
|
||||
|
|
|
@ -88,7 +88,6 @@ extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __us
|
|||
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
|
||||
extern int ptrace_attach(struct task_struct *tsk);
|
||||
extern int ptrace_detach(struct task_struct *, unsigned int);
|
||||
extern void __ptrace_detach(struct task_struct *, unsigned int);
|
||||
extern void ptrace_disable(struct task_struct *);
|
||||
extern int ptrace_check_attach(struct task_struct *task, int kill);
|
||||
extern int ptrace_request(struct task_struct *child, long request, long addr, long data);
|
||||
|
|
|
@ -214,7 +214,7 @@ out:
|
|||
return retval;
|
||||
}
|
||||
|
||||
void __ptrace_detach(struct task_struct *child, unsigned int data)
|
||||
static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
|
||||
{
|
||||
child->exit_code = data;
|
||||
/* .. re-parent .. */
|
||||
|
@ -233,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
|
|||
ptrace_disable(child);
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
/* protect against de_thread()->release_task() */
|
||||
if (child->ptrace)
|
||||
__ptrace_detach(child, data);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
|
|
@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
|
|||
spin_unlock_irqrestore(&sighand->siglock, flags);
|
||||
}
|
||||
|
||||
static inline int may_ptrace_stop(void)
|
||||
{
|
||||
if (!likely(current->ptrace & PT_PTRACED))
|
||||
return 0;
|
||||
|
||||
if (unlikely(current->parent == current->real_parent &&
|
||||
(current->ptrace & PT_ATTACHED)))
|
||||
return 0;
|
||||
|
||||
if (unlikely(current->signal == current->parent->signal) &&
|
||||
unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Are we in the middle of do_coredump?
|
||||
* If so and our tracer is also part of the coredump stopping
|
||||
* is a deadlock situation, and pointless because our tracer
|
||||
* is dead so don't allow us to stop.
|
||||
* If SIGKILL was already sent before the caller unlocked
|
||||
* ->siglock we must see ->core_waiters != 0. Otherwise it
|
||||
* is safe to enter schedule().
|
||||
*/
|
||||
if (unlikely(current->mm->core_waiters) &&
|
||||
unlikely(current->mm == current->parent->mm))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called with current->sighand->siglock held.
|
||||
*
|
||||
|
@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
|
|||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
try_to_freeze();
|
||||
read_lock(&tasklist_lock);
|
||||
if (likely(current->ptrace & PT_PTRACED) &&
|
||||
likely(current->parent != current->real_parent ||
|
||||
!(current->ptrace & PT_ATTACHED)) &&
|
||||
(likely(current->parent->signal != current->signal) ||
|
||||
!unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
|
||||
if (may_ptrace_stop()) {
|
||||
do_notify_parent_cldstop(current, CLD_TRAPPED);
|
||||
read_unlock(&tasklist_lock);
|
||||
schedule();
|
||||
|
|
Loading…
Reference in a new issue