mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: extract walk_tg_tree()
Extract walk_tg_tree() and make it a little more generic so we can use it in the schedulablity test. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
0b148fa048
commit
eb755805f2
1 changed files with 55 additions and 42 deletions
|
@ -1387,6 +1387,51 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
|
|||
update_load_sub(&rq->load, load);
|
||||
}
|
||||
|
||||
#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED))
|
||||
typedef int (*tg_visitor)(struct task_group *, void *);
|
||||
|
||||
/*
|
||||
* Iterate the full tree, calling @down when first entering a node and @up when
|
||||
* leaving it for the final time.
|
||||
*/
|
||||
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
|
||||
{
|
||||
struct task_group *parent, *child;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
parent = &root_task_group;
|
||||
down:
|
||||
ret = (*down)(parent, data);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
list_for_each_entry_rcu(child, &parent->children, siblings) {
|
||||
parent = child;
|
||||
goto down;
|
||||
|
||||
up:
|
||||
continue;
|
||||
}
|
||||
ret = (*up)(parent, data);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
child = parent;
|
||||
parent = parent->parent;
|
||||
if (parent)
|
||||
goto up;
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tg_nop(struct task_group *tg, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static unsigned long source_load(int cpu, int type);
|
||||
static unsigned long target_load(int cpu, int type);
|
||||
|
@ -1404,37 +1449,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
|||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
|
||||
typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
|
||||
|
||||
/*
|
||||
* Iterate the full tree, calling @down when first entering a node and @up when
|
||||
* leaving it for the final time.
|
||||
*/
|
||||
static void
|
||||
walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
|
||||
{
|
||||
struct task_group *parent, *child;
|
||||
|
||||
rcu_read_lock();
|
||||
parent = &root_task_group;
|
||||
down:
|
||||
(*down)(parent, cpu, sd);
|
||||
list_for_each_entry_rcu(child, &parent->children, siblings) {
|
||||
parent = child;
|
||||
goto down;
|
||||
|
||||
up:
|
||||
continue;
|
||||
}
|
||||
(*up)(parent, cpu, sd);
|
||||
|
||||
child = parent;
|
||||
parent = parent->parent;
|
||||
if (parent)
|
||||
goto up;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
|
||||
|
||||
/*
|
||||
|
@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
|
|||
* This needs to be done in a bottom-up fashion because the rq weight of a
|
||||
* parent group depends on the shares of its child groups.
|
||||
*/
|
||||
static void
|
||||
tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
|
||||
static int tg_shares_up(struct task_group *tg, void *data)
|
||||
{
|
||||
unsigned long rq_weight = 0;
|
||||
unsigned long shares = 0;
|
||||
struct sched_domain *sd = data;
|
||||
int i;
|
||||
|
||||
for_each_cpu_mask(i, sd->span) {
|
||||
|
@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
|
|||
__update_group_shares_cpu(tg, i, shares, rq_weight);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
|
|||
* This needs to be done in a top-down fashion because the load of a child
|
||||
* group is a fraction of its parents load.
|
||||
*/
|
||||
static void
|
||||
tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
|
||||
static int tg_load_down(struct task_group *tg, void *data)
|
||||
{
|
||||
unsigned long load;
|
||||
long cpu = (long)data;
|
||||
|
||||
if (!tg->parent) {
|
||||
load = cpu_rq(cpu)->load.weight;
|
||||
|
@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
|
|||
}
|
||||
|
||||
tg->cfs_rq[cpu]->h_load = load;
|
||||
}
|
||||
|
||||
static void
|
||||
tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void update_shares(struct sched_domain *sd)
|
||||
|
@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd)
|
|||
|
||||
if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
|
||||
sd->last_update = now;
|
||||
walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
|
||||
walk_tg_tree(tg_nop, tg_shares_up, sd);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
|||
spin_lock(&rq->lock);
|
||||
}
|
||||
|
||||
static void update_h_load(int cpu)
|
||||
static void update_h_load(long cpu)
|
||||
{
|
||||
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
|
||||
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
Loading…
Reference in a new issue