sched: rt-group: optimize dequeue_rt_stack

Now that the group hierarchy can have an arbitrary depth the O(n^2) nature
of RT task dequeues will really hurt. Optimize this by providing space to
store the tree path, so we can walk it the other way.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2008-04-19 19:45:00 +02:00 committed by Ingo Molnar
parent d19ca30874
commit 58d6c2d72f
2 changed files with 12 additions and 16 deletions

View file

@ -1005,6 +1005,7 @@ struct sched_rt_entity {
unsigned long timeout; unsigned long timeout;
int nr_cpus_allowed; int nr_cpus_allowed;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent; struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */ /* rq on which this entity is (to be) queued: */

View file

@ -479,26 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
/* /*
* Because the prio of an upper entry depends on the lower * Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down. * entries, we must remove entries top - down.
*
* XXX: O(1/2 h^2) because we can only walk up, not down the chain.
*/ */
static void dequeue_rt_stack(struct task_struct *p) static void dequeue_rt_stack(struct task_struct *p)
{ {
struct sched_rt_entity *rt_se, *top_se; struct sched_rt_entity *rt_se, *back = NULL;
/* rt_se = &p->rt;
* dequeue all, top - down. for_each_sched_rt_entity(rt_se) {
*/ rt_se->back = back;
do { back = rt_se;
rt_se = &p->rt; }
top_se = NULL;
for_each_sched_rt_entity(rt_se) { for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se)) if (on_rt_rq(rt_se))
top_se = rt_se; dequeue_rt_entity(rt_se);
} }
if (top_se)
dequeue_rt_entity(top_se);
} while (top_se);
} }
/* /*