mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: move double_unlock_balance() higher
Move double_lock_balance()/double_unlock_balance() higher to fix the following with gcc-3.4.6: CC kernel/sched.o In file included from kernel/sched.c:1605: kernel/sched_rt.c: In function `find_lock_lowest_rq': kernel/sched_rt.c:914: sorry, unimplemented: inlining failed in call to 'double_unlock_balance': function body not available kernel/sched_rt.c:1077: sorry, unimplemented: called from here make[2]: *** [kernel/sched.o] Error 1 Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f1860c34b3
commit
70574a996f
2 changed files with 33 additions and 38 deletions
|
@ -1581,6 +1581,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
||||
*/
|
||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(this_rq->lock)
|
||||
__acquires(busiest->lock)
|
||||
__acquires(this_rq->lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!irqs_disabled())) {
|
||||
/* printk() doesn't work good under rq->lock */
|
||||
spin_unlock(&this_rq->lock);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (unlikely(!spin_trylock(&busiest->lock))) {
|
||||
if (busiest < this_rq) {
|
||||
spin_unlock(&this_rq->lock);
|
||||
spin_lock(&busiest->lock);
|
||||
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
||||
ret = 1;
|
||||
} else
|
||||
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(busiest->lock)
|
||||
{
|
||||
spin_unlock(&busiest->lock);
|
||||
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
@ -2780,40 +2813,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
|||
__release(rq2->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
||||
*/
|
||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(this_rq->lock)
|
||||
__acquires(busiest->lock)
|
||||
__acquires(this_rq->lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!irqs_disabled())) {
|
||||
/* printk() doesn't work good under rq->lock */
|
||||
spin_unlock(&this_rq->lock);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (unlikely(!spin_trylock(&busiest->lock))) {
|
||||
if (busiest < this_rq) {
|
||||
spin_unlock(&this_rq->lock);
|
||||
spin_lock(&busiest->lock);
|
||||
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
||||
ret = 1;
|
||||
} else
|
||||
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(busiest->lock)
|
||||
{
|
||||
spin_unlock(&busiest->lock);
|
||||
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
||||
}
|
||||
|
||||
/*
|
||||
* If dest_cpu is allowed for this process, migrate the task to it.
|
||||
* This is accomplished by forcing the cpu_allowed mask to only
|
||||
|
|
|
@ -909,10 +909,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|||
/* Only try algorithms three times */
|
||||
#define RT_MAX_TRIES 3
|
||||
|
||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
|
||||
static inline void double_unlock_balance(struct rq *this_rq,
|
||||
struct rq *busiest);
|
||||
|
||||
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
|
||||
|
||||
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||
|
|
Loading…
Reference in a new issue