mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Isolate some explicit usage of task->tgid
With pid namespaces this field is now dangerous to use explicitly, so hide it behind the helpers. Also the pid and pgrp fields o task_struct and signal_struct are to be deprecated. Unfortunately this patch cannot be sent right now as this leads to tons of warnings, so start isolating them, and deprecate later. Actually the p->tgid == pid has to be changed to has_group_leader_pid(), but Oleg pointed out that in case of posix cpu timers this is the same, and thread_group_leader() is more preferable. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Acked-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Sukadev Bhattiprolu <sukadev@us.ibm.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
19b9b9b54e
commit
bac0abd617
8 changed files with 20 additions and 14 deletions
|
@ -841,8 +841,8 @@ static int de_thread(struct task_struct *tsk)
|
|||
*/
|
||||
tsk->start_time = leader->start_time;
|
||||
|
||||
BUG_ON(leader->tgid != tsk->tgid);
|
||||
BUG_ON(tsk->pid == tsk->tgid);
|
||||
BUG_ON(!same_thread_group(leader, tsk));
|
||||
BUG_ON(has_group_leader_pid(tsk));
|
||||
/*
|
||||
* An exec() starts a new thread group with the
|
||||
* TGID of the previous thread group. Rehash the
|
||||
|
|
|
@ -2553,7 +2553,7 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
|
|||
rcu_read_unlock();
|
||||
if (!task)
|
||||
goto out;
|
||||
if (leader->tgid != task->tgid)
|
||||
if (!same_thread_group(leader, task))
|
||||
goto out_drop_task;
|
||||
|
||||
result = proc_task_instantiate(dir, dentry, task, NULL);
|
||||
|
|
|
@ -1725,6 +1725,12 @@ static inline int has_group_leader_pid(struct task_struct *p)
|
|||
return p->pid == p->tgid;
|
||||
}
|
||||
|
||||
static inline
|
||||
int same_thread_group(struct task_struct *p1, struct task_struct *p2)
|
||||
{
|
||||
return p1->tgid == p2->tgid;
|
||||
}
|
||||
|
||||
static inline struct task_struct *next_thread(const struct task_struct *p)
|
||||
{
|
||||
return list_entry(rcu_dereference(p->thread_group.next),
|
||||
|
|
|
@ -21,8 +21,8 @@ static int check_clock(const clockid_t which_clock)
|
|||
|
||||
read_lock(&tasklist_lock);
|
||||
p = find_task_by_pid(pid);
|
||||
if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
|
||||
p->tgid != current->tgid : p->tgid != pid)) {
|
||||
if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
|
||||
same_thread_group(p, current) : thread_group_leader(p))) {
|
||||
error = -EINVAL;
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
@ -308,13 +308,13 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
|
|||
p = find_task_by_pid(pid);
|
||||
if (p) {
|
||||
if (CPUCLOCK_PERTHREAD(which_clock)) {
|
||||
if (p->tgid == current->tgid) {
|
||||
if (same_thread_group(p, current)) {
|
||||
error = cpu_clock_sample(which_clock,
|
||||
p, &rtn);
|
||||
}
|
||||
} else {
|
||||
read_lock(&tasklist_lock);
|
||||
if (p->tgid == pid && p->signal) {
|
||||
if (thread_group_leader(p) && p->signal) {
|
||||
error =
|
||||
cpu_clock_sample_group(which_clock,
|
||||
p, &rtn);
|
||||
|
@ -355,7 +355,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
|
|||
p = current;
|
||||
} else {
|
||||
p = find_task_by_pid(pid);
|
||||
if (p && p->tgid != current->tgid)
|
||||
if (p && !same_thread_group(p, current))
|
||||
p = NULL;
|
||||
}
|
||||
} else {
|
||||
|
@ -363,7 +363,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
|
|||
p = current->group_leader;
|
||||
} else {
|
||||
p = find_task_by_pid(pid);
|
||||
if (p && p->tgid != pid)
|
||||
if (p && !thread_group_leader(p))
|
||||
p = NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -404,7 +404,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
|
|||
|
||||
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
|
||||
(!(rtn = find_task_by_pid(event->sigev_notify_thread_id)) ||
|
||||
rtn->tgid != current->tgid ||
|
||||
!same_thread_group(rtn, current) ||
|
||||
(event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
|
||||
return NULL;
|
||||
|
||||
|
@ -608,7 +608,7 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
|
|||
spin_lock(&timr->it_lock);
|
||||
|
||||
if ((timr->it_id != timer_id) || !(timr->it_process) ||
|
||||
timr->it_process->tgid != current->tgid) {
|
||||
!same_thread_group(timr->it_process, current)) {
|
||||
spin_unlock(&timr->it_lock);
|
||||
spin_unlock_irqrestore(&idr_lock, *flags);
|
||||
timr = NULL;
|
||||
|
|
|
@ -169,7 +169,7 @@ int ptrace_attach(struct task_struct *task)
|
|||
retval = -EPERM;
|
||||
if (task->pid <= 1)
|
||||
goto out;
|
||||
if (task->tgid == current->tgid)
|
||||
if (same_thread_group(task, current))
|
||||
goto out;
|
||||
|
||||
repeat:
|
||||
|
|
|
@ -1150,7 +1150,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
|
|||
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(p) {
|
||||
if (p->pid > 1 && p->tgid != current->tgid) {
|
||||
if (p->pid > 1 && !same_thread_group(p, current)) {
|
||||
int err = group_send_sig_info(sig, info, p);
|
||||
++count;
|
||||
if (err != -EPERM)
|
||||
|
|
|
@ -326,7 +326,7 @@ static int oom_kill_task(struct task_struct *p)
|
|||
* to memory reserves though, otherwise we might deplete all memory.
|
||||
*/
|
||||
do_each_thread(g, q) {
|
||||
if (q->mm == mm && q->tgid != p->tgid)
|
||||
if (q->mm == mm && !same_thread_group(q, p))
|
||||
force_sig(SIGKILL, q);
|
||||
} while_each_thread(g, q);
|
||||
|
||||
|
|
Loading…
Reference in a new issue