mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: add avg-overlap support to RT tasks
We have the notion of tracking process-coupling (a.k.a. buddy-wake) via the p->se.last_wake / p->se.avg_overlap facilities, but it is only used for cfs to cfs interactions. There is no reason why an rt to cfs interaction cannot share in establishing a relationhip in a similar manner. Because PREEMPT_RT runs many kernel threads as FIFO priority, we often times have heavy interaction between RT threads waking CFS applications. This patch offers a substantial boost (50-60%+) in perfomance under those circumstances. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Cc: npiggin@suse.de Cc: rostedt@goodmis.org Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
c4acb2c066
commit
2087a1ad82
2 changed files with 16 additions and 19 deletions
|
@ -1693,6 +1693,12 @@ static void set_load_weight(struct task_struct *p)
|
|||
p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
|
||||
}
|
||||
|
||||
static void update_avg(u64 *avg, u64 sample)
|
||||
{
|
||||
s64 diff = sample - *avg;
|
||||
*avg += diff >> 3;
|
||||
}
|
||||
|
||||
static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
{
|
||||
sched_info_queued(p);
|
||||
|
@ -1702,6 +1708,12 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
|
|||
|
||||
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
|
||||
{
|
||||
if (sleep && p->se.last_wakeup) {
|
||||
update_avg(&p->se.avg_overlap,
|
||||
p->se.sum_exec_runtime - p->se.last_wakeup);
|
||||
p->se.last_wakeup = 0;
|
||||
}
|
||||
|
||||
p->sched_class->dequeue_task(rq, p, sleep);
|
||||
p->se.on_rq = 0;
|
||||
}
|
||||
|
@ -2313,6 +2325,8 @@ out_running:
|
|||
p->sched_class->task_wake_up(rq, p);
|
||||
#endif
|
||||
out:
|
||||
current->se.last_wakeup = current->se.sum_exec_runtime;
|
||||
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
return success;
|
||||
|
|
|
@ -726,21 +726,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
|
|||
__enqueue_entity(cfs_rq, se);
|
||||
}
|
||||
|
||||
static void update_avg(u64 *avg, u64 sample)
|
||||
{
|
||||
s64 diff = sample - *avg;
|
||||
*avg += diff >> 3;
|
||||
}
|
||||
|
||||
static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
if (!se->last_wakeup)
|
||||
return;
|
||||
|
||||
update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
|
||||
se->last_wakeup = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
||||
{
|
||||
|
@ -751,7 +736,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
|
|||
|
||||
update_stats_dequeue(cfs_rq, se);
|
||||
if (sleep) {
|
||||
update_avg_stats(cfs_rq, se);
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
if (entity_is_task(se)) {
|
||||
struct task_struct *tsk = task_of(se);
|
||||
|
@ -1196,9 +1180,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
|
|||
* a reasonable amount of time then attract this newly
|
||||
* woken task:
|
||||
*/
|
||||
if (sync && balanced && curr->sched_class == &fair_sched_class) {
|
||||
if (sync && balanced) {
|
||||
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1359,7 +1343,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
|
|||
return;
|
||||
}
|
||||
|
||||
se->last_wakeup = se->sum_exec_runtime;
|
||||
if (unlikely(se == pse))
|
||||
return;
|
||||
|
||||
|
|
Loading…
Reference in a new issue