sched: Discard some old bits

WAKEUP_RUNNING was an experiment, not sure why that ever ended up being
merged...

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-11-30 13:00:37 +01:00 committed by Ingo Molnar
parent 3a7e73a2e2
commit 6cecd084d0
5 changed files with 7 additions and 21 deletions

View file

@ -1152,8 +1152,6 @@ struct sched_entity {
u64 start_runtime; u64 start_runtime;
u64 avg_wakeup; u64 avg_wakeup;
u64 avg_running;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
u64 wait_start; u64 wait_start;
u64 wait_max; u64 wait_max;

View file

@ -2493,7 +2493,6 @@ static void __sched_fork(struct task_struct *p)
p->se.avg_overlap = 0; p->se.avg_overlap = 0;
p->se.start_runtime = 0; p->se.start_runtime = 0;
p->se.avg_wakeup = sysctl_sched_wakeup_granularity; p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
p->se.avg_running = 0;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0; p->se.wait_start = 0;
@ -5379,13 +5378,14 @@ static inline void schedule_debug(struct task_struct *prev)
#endif #endif
} }
static void put_prev_task(struct rq *rq, struct task_struct *p) static void put_prev_task(struct rq *rq, struct task_struct *prev)
{ {
u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; if (prev->state == TASK_RUNNING) {
u64 runtime = prev->se.sum_exec_runtime;
update_avg(&p->se.avg_running, runtime); runtime -= prev->se.prev_sum_exec_runtime;
runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
if (p->state == TASK_RUNNING) {
/* /*
* In order to avoid avg_overlap growing stale when we are * In order to avoid avg_overlap growing stale when we are
* indeed overlapping and hence not getting put to sleep, grow * indeed overlapping and hence not getting put to sleep, grow
@ -5395,12 +5395,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
* correlates to the amount of cache footprint a task can * correlates to the amount of cache footprint a task can
* build up. * build up.
*/ */
runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); update_avg(&prev->se.avg_overlap, runtime);
update_avg(&p->se.avg_overlap, runtime);
} else {
update_avg(&p->se.avg_running, 0);
} }
p->sched_class->put_prev_task(rq, p); prev->sched_class->put_prev_task(rq, prev);
} }
/* /*

View file

@ -399,7 +399,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.sum_exec_runtime); PN(se.sum_exec_runtime);
PN(se.avg_overlap); PN(se.avg_overlap);
PN(se.avg_wakeup); PN(se.avg_wakeup);
PN(se.avg_running);
nr_switches = p->nvcsw + p->nivcsw; nr_switches = p->nvcsw + p->nivcsw;

View file

@ -1689,9 +1689,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
pse->avg_overlap < sysctl_sched_migration_cost) pse->avg_overlap < sysctl_sched_migration_cost)
goto preempt; goto preempt;
if (sched_feat(WAKEUP_RUNNING) && pse->avg_running < se->avg_running)
goto preempt;
if (!sched_feat(WAKEUP_PREEMPT)) if (!sched_feat(WAKEUP_PREEMPT))
return; return;

View file

@ -53,11 +53,6 @@ SCHED_FEAT(WAKEUP_SYNC, 0)
*/ */
SCHED_FEAT(WAKEUP_OVERLAP, 0) SCHED_FEAT(WAKEUP_OVERLAP, 0)
/*
* Wakeup preemption towards tasks that run short
*/
SCHED_FEAT(WAKEUP_RUNNING, 0)
/* /*
* Use the SYNC wakeup hint, pipes and the likes use this to indicate * Use the SYNC wakeup hint, pipes and the likes use this to indicate
* the remote end is likely to consume the data we just wrote, and * the remote end is likely to consume the data we just wrote, and