From 58d6c2d72f8628f39e8689fbde8aa177fcf00a37 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 19 Apr 2008 19:45:00 +0200 Subject: [PATCH] sched: rt-group: optimize dequeue_rt_stack Now that the group hierarchy can have an arbitrary depth the O(n^2) nature of RT task dequeues will really hurt. Optimize this by providing space to store the tree path, so we can walk it the other way. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + kernel/sched_rt.c | 27 +++++++++++---------------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 0a32059e6ed..887f5db8942 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1005,6 +1005,7 @@ struct sched_rt_entity { unsigned long timeout; int nr_cpus_allowed; + struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED struct sched_rt_entity *parent; /* rq on which this entity is (to be) queued: */ diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 736fb8fd897..c2730a5a4f0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -479,26 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) /* * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. - * - * XXX: O(1/2 h^2) because we can only walk up, not down the chain. */ static void dequeue_rt_stack(struct task_struct *p) { - struct sched_rt_entity *rt_se, *top_se; + struct sched_rt_entity *rt_se, *back = NULL; - /* - * dequeue all, top - down. - */ - do { - rt_se = &p->rt; - top_se = NULL; - for_each_sched_rt_entity(rt_se) { - if (on_rt_rq(rt_se)) - top_se = rt_se; - } - if (top_se) - dequeue_rt_entity(top_se); - } while (top_se); + rt_se = &p->rt; + for_each_sched_rt_entity(rt_se) { + rt_se->back = back; + back = rt_se; + } + + for (rt_se = back; rt_se; rt_se = rt_se->back) { + if (on_rt_rq(rt_se)) + dequeue_rt_entity(rt_se); + } } /*