sched: Use rcu in sched_get/set_affinity()

tasklist_lock is held read locked to protect the
find_task_by_vpid() call and to prevent the task going away.
sched_setaffinity acquires a task struct ref and drops tasklist
lock right away. The access to the cpus_allowed mask is
protected by rq->lock.

rcu_read_lock() provides the same protection here.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20091209100706.789059966@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Thomas Gleixner 2009-12-09 10:15:01 +00:00 committed by Ingo Molnar
parent 5fe85be081
commit 23f5d14251

View file

@ -6516,22 +6516,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
int retval; int retval;
get_online_cpus(); get_online_cpus();
read_lock(&tasklist_lock); rcu_read_lock();
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
if (!p) { if (!p) {
read_unlock(&tasklist_lock); rcu_read_unlock();
put_online_cpus(); put_online_cpus();
return -ESRCH; return -ESRCH;
} }
/* /* Prevent p going away */
* It is not safe to call set_cpus_allowed with the
* tasklist_lock held. We will bump the task_struct's
* usage count and then drop tasklist_lock.
*/
get_task_struct(p); get_task_struct(p);
read_unlock(&tasklist_lock); rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM; retval = -ENOMEM;
@ -6617,7 +6613,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
int retval; int retval;
get_online_cpus(); get_online_cpus();
read_lock(&tasklist_lock); rcu_read_lock();
retval = -ESRCH; retval = -ESRCH;
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
@ -6633,7 +6629,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
out_unlock: out_unlock:
read_unlock(&tasklist_lock); rcu_read_unlock();
put_online_cpus(); put_online_cpus();
return retval; return retval;