mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
lockdep: re-annotate scheduler runqueues
Instead of using a per-rq lock class, use the regular nesting operations. However, take extra care with double_lock_balance() as it can release the already held rq->lock (and therefore change its nesting class). So what can happen is: spin_lock(rq->lock); // this rq subclass 0 double_lock_balance(rq, other_rq); // release rq // acquire other_rq->lock subclass 0 // acquire rq->lock subclass 1 spin_unlock(other_rq->lock); leaving you with rq->lock in subclass 1 So a subsequent double_lock_balance() call can try to nest a subclass 1 lock while already holding a subclass 1 lock. Fix this by introducing double_unlock_balance() which releases the other rq's lock, but also re-sets the subclass for this rq's lock to 0. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
64aa348edc
commit
1b12bbc747
2 changed files with 14 additions and 5 deletions
|
@ -2812,6 +2812,13 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
||||||
|
__releases(busiest->lock)
|
||||||
|
{
|
||||||
|
spin_unlock(&busiest->lock);
|
||||||
|
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If dest_cpu is allowed for this process, migrate the task to it.
|
* If dest_cpu is allowed for this process, migrate the task to it.
|
||||||
* This is accomplished by forcing the cpu_allowed mask to only
|
* This is accomplished by forcing the cpu_allowed mask to only
|
||||||
|
@ -3636,7 +3643,7 @@ redo:
|
||||||
ld_moved = move_tasks(this_rq, this_cpu, busiest,
|
ld_moved = move_tasks(this_rq, this_cpu, busiest,
|
||||||
imbalance, sd, CPU_NEWLY_IDLE,
|
imbalance, sd, CPU_NEWLY_IDLE,
|
||||||
&all_pinned);
|
&all_pinned);
|
||||||
spin_unlock(&busiest->lock);
|
double_unlock_balance(this_rq, busiest);
|
||||||
|
|
||||||
if (unlikely(all_pinned)) {
|
if (unlikely(all_pinned)) {
|
||||||
cpu_clear(cpu_of(busiest), *cpus);
|
cpu_clear(cpu_of(busiest), *cpus);
|
||||||
|
@ -3751,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
|
||||||
else
|
else
|
||||||
schedstat_inc(sd, alb_failed);
|
schedstat_inc(sd, alb_failed);
|
||||||
}
|
}
|
||||||
spin_unlock(&target_rq->lock);
|
double_unlock_balance(busiest_rq, target_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
|
|
|
@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
||||||
#define RT_MAX_TRIES 3
|
#define RT_MAX_TRIES 3
|
||||||
|
|
||||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
|
static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
|
||||||
|
static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
|
||||||
|
|
||||||
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
|
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
|
||||||
|
|
||||||
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||||
|
@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* try again */
|
/* try again */
|
||||||
spin_unlock(&lowest_rq->lock);
|
double_unlock_balance(rq, lowest_rq);
|
||||||
lowest_rq = NULL;
|
lowest_rq = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
|
||||||
|
|
||||||
resched_task(lowest_rq->curr);
|
resched_task(lowest_rq->curr);
|
||||||
|
|
||||||
spin_unlock(&lowest_rq->lock);
|
double_unlock_balance(rq, lowest_rq);
|
||||||
|
|
||||||
ret = 1;
|
ret = 1;
|
||||||
out:
|
out:
|
||||||
|
@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
|
||||||
|
|
||||||
}
|
}
|
||||||
skip:
|
skip:
|
||||||
spin_unlock(&src_rq->lock);
|
double_unlock_balance(this_rq, src_rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
Loading…
Reference in a new issue