mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: fix nr_uninterruptible accounting of frozen tasks really sched: fix load average accounting vs. cpu hotplug sched: Account for vruntime wrapping
This commit is contained in:
commit
356d1b52eb
4 changed files with 19 additions and 5 deletions
|
@ -209,7 +209,7 @@ extern unsigned long long time_sync_thresh;
|
|||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||
#define task_contributes_to_load(task) \
|
||||
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
||||
(task->flags & PF_FROZEN) == 0)
|
||||
(task->flags & PF_FREEZING) == 0)
|
||||
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
|
@ -1680,6 +1680,7 @@ extern cputime_t task_gtime(struct task_struct *p);
|
|||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
|
||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
|
||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||
|
|
|
@ -44,12 +44,19 @@ void refrigerator(void)
|
|||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
/* prevent accounting of that task to load */
|
||||
current->flags |= PF_FREEZING;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!frozen(current))
|
||||
break;
|
||||
schedule();
|
||||
}
|
||||
|
||||
/* Remove the accounting blocker */
|
||||
current->flags &= ~PF_FREEZING;
|
||||
|
||||
pr_debug("%s left refrigerator\n", current->comm);
|
||||
__set_current_state(save);
|
||||
}
|
||||
|
|
|
@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
|
|||
static void calc_global_load_remove(struct rq *rq)
|
||||
{
|
||||
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
|
||||
rq->calc_load_active = 0;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
|
@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
task_rq_unlock(rq, &flags);
|
||||
get_task_struct(p);
|
||||
cpu_rq(cpu)->migration_thread = p;
|
||||
rq->calc_load_update = calc_load_update;
|
||||
break;
|
||||
|
||||
case CPU_ONLINE:
|
||||
|
@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
/* Update our root-domain */
|
||||
rq = cpu_rq(cpu);
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
rq->calc_load_update = calc_load_update;
|
||||
rq->calc_load_active = 0;
|
||||
if (rq->rd) {
|
||||
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
||||
|
||||
|
|
|
@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
|
|||
return min_vruntime;
|
||||
}
|
||||
|
||||
static inline int entity_before(struct sched_entity *a,
|
||||
struct sched_entity *b)
|
||||
{
|
||||
return (s64)(a->vruntime - b->vruntime) < 0;
|
||||
}
|
||||
|
||||
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
return se->vruntime - cfs_rq->min_vruntime;
|
||||
|
@ -1017,7 +1023,7 @@ static void yield_task_fair(struct rq *rq)
|
|||
/*
|
||||
* Already in the rightmost position?
|
||||
*/
|
||||
if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
|
||||
if (unlikely(!rightmost || entity_before(rightmost, se)))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -1713,7 +1719,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
|
|||
|
||||
/* 'curr' will be NULL if the child belongs to a different group */
|
||||
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
|
||||
curr && curr->vruntime < se->vruntime) {
|
||||
curr && entity_before(curr, se)) {
|
||||
/*
|
||||
* Upon rescheduling, sched_class::put_prev_task() will place
|
||||
* 'current' within the tree based on its new key value.
|
||||
|
|
Loading…
Reference in a new issue