mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[PATCH] sched: revert "filter affine wakeups"
Revert commitd7102e95b7
: [PATCH] sched: filter affine wakeups Apparently caused more than 10% performance regression for aim7 benchmark. The setup in use is 16-cpu HP rx8620, 64Gb of memory and 12 MSA1000s with 144 disks. Each disk is 72Gb with a single ext3 filesystem (courtesy of HP, who supplied benchmark results). The problem is, for aim7, the wake-up pattern is random, but it still needs load balancing action in the wake-up path to achieve best performance. With the above commit, lack of load balancing hurts that workload. However, for workloads like database transaction processing, the requirement is exactly opposite. In the wake up path, best performance is achieved with absolutely zero load balancing. We simply wake up the process on the CPU that it was previously run. Worst performance is obtained when we do load balancing at wake up. There isn't an easy way to auto detect the workload characteristics. Ingo's earlier patch that detects idle CPU and decide whether to load balance or not doesn't perform with aim7 either since all CPUs are busy (it causes even bigger perf. regression). Revert commitd7102e95b7
, which causes more than 10% performance regression with aim7. Signed-off-by: Ken Chen <kenneth.w.chen@intel.com> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
f822566165
commit
d6077cb80c
2 changed files with 2 additions and 13 deletions
|
@ -697,11 +697,8 @@ struct task_struct {
|
|||
|
||||
int lock_depth; /* BKL lock depth */
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
int last_waker_cpu; /* CPU that last woke this task up */
|
||||
#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
int oncpu;
|
||||
#endif
|
||||
#endif
|
||||
int prio, static_prio;
|
||||
struct list_head run_list;
|
||||
|
|
|
@ -1204,9 +1204,6 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
|
|||
}
|
||||
}
|
||||
|
||||
if (p->last_waker_cpu != this_cpu)
|
||||
goto out_set_cpu;
|
||||
|
||||
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
|
||||
goto out_set_cpu;
|
||||
|
||||
|
@ -1277,8 +1274,6 @@ out_set_cpu:
|
|||
cpu = task_cpu(p);
|
||||
}
|
||||
|
||||
p->last_waker_cpu = this_cpu;
|
||||
|
||||
out_activate:
|
||||
#endif /* CONFIG_SMP */
|
||||
if (old_state == TASK_UNINTERRUPTIBLE) {
|
||||
|
@ -1360,12 +1355,9 @@ void fastcall sched_fork(task_t *p, int clone_flags)
|
|||
#ifdef CONFIG_SCHEDSTATS
|
||||
memset(&p->sched_info, 0, sizeof(p->sched_info));
|
||||
#endif
|
||||
#if defined(CONFIG_SMP)
|
||||
p->last_waker_cpu = cpu;
|
||||
#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
p->oncpu = 0;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/* Want to start with kernel preemption disabled. */
|
||||
task_thread_info(p)->preempt_count = 1;
|
||||
|
|
Loading…
Reference in a new issue