mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: optimize ttwu vs group scheduling
Impact: micro-optimization We can avoid the sched domain walk on try_to_wake_up() when we know there are no groups. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1236603381.8389.455.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8c54436ae9
commit
57310a98a3
1 changed files with 15 additions and 1 deletions
|
@ -331,6 +331,13 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
|
|||
*/
|
||||
static DEFINE_SPINLOCK(task_group_lock);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int root_task_group_empty(void)
|
||||
{
|
||||
return list_empty(&root_task_group.children);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
|
||||
|
@ -391,6 +398,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
|||
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int root_task_group_empty(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
|
||||
static inline struct task_group *task_group(struct task_struct *p)
|
||||
{
|
||||
|
@ -2318,7 +2332,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
|||
sync = 0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (sched_feat(LB_WAKEUP_UPDATE)) {
|
||||
if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
|
||||
struct sched_domain *sd;
|
||||
|
||||
this_cpu = raw_smp_processor_id();
|
||||
|
|
Loading…
Reference in a new issue