mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
cpumask: reduce stack usage in SD_x_INIT initializers
* Remove empty cpumask_t (and all non-zero/non-null) variables in SD_*_INIT macros. Use memset(0) to clear. Also, don't inline the initializer functions to save on stack space in build_sched_domains(). * Merge change to include/linux/topology.h that uses the new node_to_cpumask_ptr function in the nr_cpus_node macro into this patch. Depends on: [mm-patch]: asm-generic-add-node_to_cpumask_ptr-macro.patch [sched-devel]: sched: add new set_cpus_allowed_ptr function Cc: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
c5f59f0833
commit
7c16ec585c
3 changed files with 256 additions and 163 deletions
|
@ -154,10 +154,6 @@ extern unsigned long node_remap_size[];
|
||||||
|
|
||||||
/* sched_domains SD_NODE_INIT for NUMAQ machines */
|
/* sched_domains SD_NODE_INIT for NUMAQ machines */
|
||||||
#define SD_NODE_INIT (struct sched_domain) { \
|
#define SD_NODE_INIT (struct sched_domain) { \
|
||||||
.span = CPU_MASK_NONE, \
|
|
||||||
.parent = NULL, \
|
|
||||||
.child = NULL, \
|
|
||||||
.groups = NULL, \
|
|
||||||
.min_interval = 8, \
|
.min_interval = 8, \
|
||||||
.max_interval = 32, \
|
.max_interval = 32, \
|
||||||
.busy_factor = 32, \
|
.busy_factor = 32, \
|
||||||
|
@ -175,7 +171,6 @@ extern unsigned long node_remap_size[];
|
||||||
| SD_WAKE_BALANCE, \
|
| SD_WAKE_BALANCE, \
|
||||||
.last_balance = jiffies, \
|
.last_balance = jiffies, \
|
||||||
.balance_interval = 1, \
|
.balance_interval = 1, \
|
||||||
.nr_balance_failed = 0, \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64_ACPI_NUMA
|
#ifdef CONFIG_X86_64_ACPI_NUMA
|
||||||
|
|
|
@ -38,16 +38,15 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef nr_cpus_node
|
#ifndef nr_cpus_node
|
||||||
#define nr_cpus_node(node) \
|
#define nr_cpus_node(node) \
|
||||||
({ \
|
({ \
|
||||||
cpumask_t __tmp__; \
|
node_to_cpumask_ptr(__tmp__, node); \
|
||||||
__tmp__ = node_to_cpumask(node); \
|
cpus_weight(*__tmp__); \
|
||||||
cpus_weight(__tmp__); \
|
|
||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define for_each_node_with_cpus(node) \
|
#define for_each_node_with_cpus(node) \
|
||||||
for_each_online_node(node) \
|
for_each_online_node(node) \
|
||||||
if (nr_cpus_node(node))
|
if (nr_cpus_node(node))
|
||||||
|
|
||||||
void arch_update_cpu_topology(void);
|
void arch_update_cpu_topology(void);
|
||||||
|
@ -80,7 +79,9 @@ void arch_update_cpu_topology(void);
|
||||||
* by defining their own arch-specific initializer in include/asm/topology.h.
|
* by defining their own arch-specific initializer in include/asm/topology.h.
|
||||||
* A definition there will automagically override these default initializers
|
* A definition there will automagically override these default initializers
|
||||||
* and allow arch-specific performance tuning of sched_domains.
|
* and allow arch-specific performance tuning of sched_domains.
|
||||||
|
* (Only non-zero and non-null fields need be specified.)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_SMT
|
#ifdef CONFIG_SCHED_SMT
|
||||||
/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
|
/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
|
||||||
* so can't we drop this in favor of CONFIG_SCHED_SMT?
|
* so can't we drop this in favor of CONFIG_SCHED_SMT?
|
||||||
|
@ -89,20 +90,10 @@ void arch_update_cpu_topology(void);
|
||||||
/* Common values for SMT siblings */
|
/* Common values for SMT siblings */
|
||||||
#ifndef SD_SIBLING_INIT
|
#ifndef SD_SIBLING_INIT
|
||||||
#define SD_SIBLING_INIT (struct sched_domain) { \
|
#define SD_SIBLING_INIT (struct sched_domain) { \
|
||||||
.span = CPU_MASK_NONE, \
|
|
||||||
.parent = NULL, \
|
|
||||||
.child = NULL, \
|
|
||||||
.groups = NULL, \
|
|
||||||
.min_interval = 1, \
|
.min_interval = 1, \
|
||||||
.max_interval = 2, \
|
.max_interval = 2, \
|
||||||
.busy_factor = 64, \
|
.busy_factor = 64, \
|
||||||
.imbalance_pct = 110, \
|
.imbalance_pct = 110, \
|
||||||
.cache_nice_tries = 0, \
|
|
||||||
.busy_idx = 0, \
|
|
||||||
.idle_idx = 0, \
|
|
||||||
.newidle_idx = 0, \
|
|
||||||
.wake_idx = 0, \
|
|
||||||
.forkexec_idx = 0, \
|
|
||||||
.flags = SD_LOAD_BALANCE \
|
.flags = SD_LOAD_BALANCE \
|
||||||
| SD_BALANCE_NEWIDLE \
|
| SD_BALANCE_NEWIDLE \
|
||||||
| SD_BALANCE_FORK \
|
| SD_BALANCE_FORK \
|
||||||
|
@ -112,7 +103,6 @@ void arch_update_cpu_topology(void);
|
||||||
| SD_SHARE_CPUPOWER, \
|
| SD_SHARE_CPUPOWER, \
|
||||||
.last_balance = jiffies, \
|
.last_balance = jiffies, \
|
||||||
.balance_interval = 1, \
|
.balance_interval = 1, \
|
||||||
.nr_balance_failed = 0, \
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_SCHED_SMT */
|
#endif /* CONFIG_SCHED_SMT */
|
||||||
|
@ -121,18 +111,12 @@ void arch_update_cpu_topology(void);
|
||||||
/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
|
/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
|
||||||
#ifndef SD_MC_INIT
|
#ifndef SD_MC_INIT
|
||||||
#define SD_MC_INIT (struct sched_domain) { \
|
#define SD_MC_INIT (struct sched_domain) { \
|
||||||
.span = CPU_MASK_NONE, \
|
|
||||||
.parent = NULL, \
|
|
||||||
.child = NULL, \
|
|
||||||
.groups = NULL, \
|
|
||||||
.min_interval = 1, \
|
.min_interval = 1, \
|
||||||
.max_interval = 4, \
|
.max_interval = 4, \
|
||||||
.busy_factor = 64, \
|
.busy_factor = 64, \
|
||||||
.imbalance_pct = 125, \
|
.imbalance_pct = 125, \
|
||||||
.cache_nice_tries = 1, \
|
.cache_nice_tries = 1, \
|
||||||
.busy_idx = 2, \
|
.busy_idx = 2, \
|
||||||
.idle_idx = 0, \
|
|
||||||
.newidle_idx = 0, \
|
|
||||||
.wake_idx = 1, \
|
.wake_idx = 1, \
|
||||||
.forkexec_idx = 1, \
|
.forkexec_idx = 1, \
|
||||||
.flags = SD_LOAD_BALANCE \
|
.flags = SD_LOAD_BALANCE \
|
||||||
|
@ -144,7 +128,6 @@ void arch_update_cpu_topology(void);
|
||||||
| BALANCE_FOR_MC_POWER, \
|
| BALANCE_FOR_MC_POWER, \
|
||||||
.last_balance = jiffies, \
|
.last_balance = jiffies, \
|
||||||
.balance_interval = 1, \
|
.balance_interval = 1, \
|
||||||
.nr_balance_failed = 0, \
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_SCHED_MC */
|
#endif /* CONFIG_SCHED_MC */
|
||||||
|
@ -152,10 +135,6 @@ void arch_update_cpu_topology(void);
|
||||||
/* Common values for CPUs */
|
/* Common values for CPUs */
|
||||||
#ifndef SD_CPU_INIT
|
#ifndef SD_CPU_INIT
|
||||||
#define SD_CPU_INIT (struct sched_domain) { \
|
#define SD_CPU_INIT (struct sched_domain) { \
|
||||||
.span = CPU_MASK_NONE, \
|
|
||||||
.parent = NULL, \
|
|
||||||
.child = NULL, \
|
|
||||||
.groups = NULL, \
|
|
||||||
.min_interval = 1, \
|
.min_interval = 1, \
|
||||||
.max_interval = 4, \
|
.max_interval = 4, \
|
||||||
.busy_factor = 64, \
|
.busy_factor = 64, \
|
||||||
|
@ -174,16 +153,11 @@ void arch_update_cpu_topology(void);
|
||||||
| BALANCE_FOR_PKG_POWER,\
|
| BALANCE_FOR_PKG_POWER,\
|
||||||
.last_balance = jiffies, \
|
.last_balance = jiffies, \
|
||||||
.balance_interval = 1, \
|
.balance_interval = 1, \
|
||||||
.nr_balance_failed = 0, \
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* sched_domains SD_ALLNODES_INIT for NUMA machines */
|
/* sched_domains SD_ALLNODES_INIT for NUMA machines */
|
||||||
#define SD_ALLNODES_INIT (struct sched_domain) { \
|
#define SD_ALLNODES_INIT (struct sched_domain) { \
|
||||||
.span = CPU_MASK_NONE, \
|
|
||||||
.parent = NULL, \
|
|
||||||
.child = NULL, \
|
|
||||||
.groups = NULL, \
|
|
||||||
.min_interval = 64, \
|
.min_interval = 64, \
|
||||||
.max_interval = 64*num_online_cpus(), \
|
.max_interval = 64*num_online_cpus(), \
|
||||||
.busy_factor = 128, \
|
.busy_factor = 128, \
|
||||||
|
@ -191,14 +165,10 @@ void arch_update_cpu_topology(void);
|
||||||
.cache_nice_tries = 1, \
|
.cache_nice_tries = 1, \
|
||||||
.busy_idx = 3, \
|
.busy_idx = 3, \
|
||||||
.idle_idx = 3, \
|
.idle_idx = 3, \
|
||||||
.newidle_idx = 0, /* unused */ \
|
|
||||||
.wake_idx = 0, /* unused */ \
|
|
||||||
.forkexec_idx = 0, /* unused */ \
|
|
||||||
.flags = SD_LOAD_BALANCE \
|
.flags = SD_LOAD_BALANCE \
|
||||||
| SD_SERIALIZE, \
|
| SD_SERIALIZE, \
|
||||||
.last_balance = jiffies, \
|
.last_balance = jiffies, \
|
||||||
.balance_interval = 64, \
|
.balance_interval = 64, \
|
||||||
.nr_balance_failed = 0, \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
|
368
kernel/sched.c
368
kernel/sched.c
|
@ -1869,17 +1869,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
|
||||||
* find_idlest_cpu - find the idlest cpu among the cpus in group.
|
* find_idlest_cpu - find the idlest cpu among the cpus in group.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
|
||||||
|
cpumask_t *tmp)
|
||||||
{
|
{
|
||||||
cpumask_t tmp;
|
|
||||||
unsigned long load, min_load = ULONG_MAX;
|
unsigned long load, min_load = ULONG_MAX;
|
||||||
int idlest = -1;
|
int idlest = -1;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Traverse only the allowed CPUs */
|
/* Traverse only the allowed CPUs */
|
||||||
cpus_and(tmp, group->cpumask, p->cpus_allowed);
|
cpus_and(*tmp, group->cpumask, p->cpus_allowed);
|
||||||
|
|
||||||
for_each_cpu_mask(i, tmp) {
|
for_each_cpu_mask(i, *tmp) {
|
||||||
load = weighted_cpuload(i);
|
load = weighted_cpuload(i);
|
||||||
|
|
||||||
if (load < min_load || (load == min_load && i == this_cpu)) {
|
if (load < min_load || (load == min_load && i == this_cpu)) {
|
||||||
|
@ -1918,7 +1918,7 @@ static int sched_balance_self(int cpu, int flag)
|
||||||
}
|
}
|
||||||
|
|
||||||
while (sd) {
|
while (sd) {
|
||||||
cpumask_t span;
|
cpumask_t span, tmpmask;
|
||||||
struct sched_group *group;
|
struct sched_group *group;
|
||||||
int new_cpu, weight;
|
int new_cpu, weight;
|
||||||
|
|
||||||
|
@ -1934,7 +1934,7 @@ static int sched_balance_self(int cpu, int flag)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
new_cpu = find_idlest_cpu(group, t, cpu);
|
new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
|
||||||
if (new_cpu == -1 || new_cpu == cpu) {
|
if (new_cpu == -1 || new_cpu == cpu) {
|
||||||
/* Now try balancing at a lower domain level of cpu */
|
/* Now try balancing at a lower domain level of cpu */
|
||||||
sd = sd->child;
|
sd = sd->child;
|
||||||
|
@ -2818,7 +2818,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
static struct sched_group *
|
static struct sched_group *
|
||||||
find_busiest_group(struct sched_domain *sd, int this_cpu,
|
find_busiest_group(struct sched_domain *sd, int this_cpu,
|
||||||
unsigned long *imbalance, enum cpu_idle_type idle,
|
unsigned long *imbalance, enum cpu_idle_type idle,
|
||||||
int *sd_idle, cpumask_t *cpus, int *balance)
|
int *sd_idle, const cpumask_t *cpus, int *balance)
|
||||||
{
|
{
|
||||||
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
|
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
|
||||||
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
|
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
|
||||||
|
@ -3119,7 +3119,7 @@ ret:
|
||||||
*/
|
*/
|
||||||
static struct rq *
|
static struct rq *
|
||||||
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
||||||
unsigned long imbalance, cpumask_t *cpus)
|
unsigned long imbalance, const cpumask_t *cpus)
|
||||||
{
|
{
|
||||||
struct rq *busiest = NULL, *rq;
|
struct rq *busiest = NULL, *rq;
|
||||||
unsigned long max_load = 0;
|
unsigned long max_load = 0;
|
||||||
|
@ -3158,15 +3158,16 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
||||||
*/
|
*/
|
||||||
static int load_balance(int this_cpu, struct rq *this_rq,
|
static int load_balance(int this_cpu, struct rq *this_rq,
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||||
int *balance)
|
int *balance, cpumask_t *cpus)
|
||||||
{
|
{
|
||||||
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
|
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
|
||||||
struct sched_group *group;
|
struct sched_group *group;
|
||||||
unsigned long imbalance;
|
unsigned long imbalance;
|
||||||
struct rq *busiest;
|
struct rq *busiest;
|
||||||
cpumask_t cpus = CPU_MASK_ALL;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
cpus_setall(*cpus);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When power savings policy is enabled for the parent domain, idle
|
* When power savings policy is enabled for the parent domain, idle
|
||||||
* sibling can pick up load irrespective of busy siblings. In this case,
|
* sibling can pick up load irrespective of busy siblings. In this case,
|
||||||
|
@ -3181,7 +3182,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
||||||
|
|
||||||
redo:
|
redo:
|
||||||
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
|
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
|
||||||
&cpus, balance);
|
cpus, balance);
|
||||||
|
|
||||||
if (*balance == 0)
|
if (*balance == 0)
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
|
@ -3191,7 +3192,7 @@ redo:
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
}
|
}
|
||||||
|
|
||||||
busiest = find_busiest_queue(group, idle, imbalance, &cpus);
|
busiest = find_busiest_queue(group, idle, imbalance, cpus);
|
||||||
if (!busiest) {
|
if (!busiest) {
|
||||||
schedstat_inc(sd, lb_nobusyq[idle]);
|
schedstat_inc(sd, lb_nobusyq[idle]);
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
|
@ -3224,8 +3225,8 @@ redo:
|
||||||
|
|
||||||
/* All tasks on this runqueue were pinned by CPU affinity */
|
/* All tasks on this runqueue were pinned by CPU affinity */
|
||||||
if (unlikely(all_pinned)) {
|
if (unlikely(all_pinned)) {
|
||||||
cpu_clear(cpu_of(busiest), cpus);
|
cpu_clear(cpu_of(busiest), *cpus);
|
||||||
if (!cpus_empty(cpus))
|
if (!cpus_empty(*cpus))
|
||||||
goto redo;
|
goto redo;
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
}
|
}
|
||||||
|
@ -3310,7 +3311,8 @@ out_one_pinned:
|
||||||
* this_rq is locked.
|
* this_rq is locked.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
|
||||||
|
cpumask_t *cpus)
|
||||||
{
|
{
|
||||||
struct sched_group *group;
|
struct sched_group *group;
|
||||||
struct rq *busiest = NULL;
|
struct rq *busiest = NULL;
|
||||||
|
@ -3318,7 +3320,8 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
||||||
int ld_moved = 0;
|
int ld_moved = 0;
|
||||||
int sd_idle = 0;
|
int sd_idle = 0;
|
||||||
int all_pinned = 0;
|
int all_pinned = 0;
|
||||||
cpumask_t cpus = CPU_MASK_ALL;
|
|
||||||
|
cpus_setall(*cpus);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When power savings policy is enabled for the parent domain, idle
|
* When power savings policy is enabled for the parent domain, idle
|
||||||
|
@ -3333,14 +3336,13 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
||||||
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
||||||
redo:
|
redo:
|
||||||
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
||||||
&sd_idle, &cpus, NULL);
|
&sd_idle, cpus, NULL);
|
||||||
if (!group) {
|
if (!group) {
|
||||||
schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
|
schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
}
|
}
|
||||||
|
|
||||||
busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
|
busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
|
||||||
&cpus);
|
|
||||||
if (!busiest) {
|
if (!busiest) {
|
||||||
schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
|
schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
|
||||||
goto out_balanced;
|
goto out_balanced;
|
||||||
|
@ -3362,8 +3364,8 @@ redo:
|
||||||
spin_unlock(&busiest->lock);
|
spin_unlock(&busiest->lock);
|
||||||
|
|
||||||
if (unlikely(all_pinned)) {
|
if (unlikely(all_pinned)) {
|
||||||
cpu_clear(cpu_of(busiest), cpus);
|
cpu_clear(cpu_of(busiest), *cpus);
|
||||||
if (!cpus_empty(cpus))
|
if (!cpus_empty(*cpus))
|
||||||
goto redo;
|
goto redo;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3397,6 +3399,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||||
struct sched_domain *sd;
|
struct sched_domain *sd;
|
||||||
int pulled_task = -1;
|
int pulled_task = -1;
|
||||||
unsigned long next_balance = jiffies + HZ;
|
unsigned long next_balance = jiffies + HZ;
|
||||||
|
cpumask_t tmpmask;
|
||||||
|
|
||||||
for_each_domain(this_cpu, sd) {
|
for_each_domain(this_cpu, sd) {
|
||||||
unsigned long interval;
|
unsigned long interval;
|
||||||
|
@ -3406,8 +3409,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||||
|
|
||||||
if (sd->flags & SD_BALANCE_NEWIDLE)
|
if (sd->flags & SD_BALANCE_NEWIDLE)
|
||||||
/* If we've pulled tasks over stop searching: */
|
/* If we've pulled tasks over stop searching: */
|
||||||
pulled_task = load_balance_newidle(this_cpu,
|
pulled_task = load_balance_newidle(this_cpu, this_rq,
|
||||||
this_rq, sd);
|
sd, &tmpmask);
|
||||||
|
|
||||||
interval = msecs_to_jiffies(sd->balance_interval);
|
interval = msecs_to_jiffies(sd->balance_interval);
|
||||||
if (time_after(next_balance, sd->last_balance + interval))
|
if (time_after(next_balance, sd->last_balance + interval))
|
||||||
|
@ -3566,6 +3569,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
||||||
/* Earliest time when we have to do rebalance again */
|
/* Earliest time when we have to do rebalance again */
|
||||||
unsigned long next_balance = jiffies + 60*HZ;
|
unsigned long next_balance = jiffies + 60*HZ;
|
||||||
int update_next_balance = 0;
|
int update_next_balance = 0;
|
||||||
|
cpumask_t tmp;
|
||||||
|
|
||||||
for_each_domain(cpu, sd) {
|
for_each_domain(cpu, sd) {
|
||||||
if (!(sd->flags & SD_LOAD_BALANCE))
|
if (!(sd->flags & SD_LOAD_BALANCE))
|
||||||
|
@ -3589,7 +3593,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (time_after_eq(jiffies, sd->last_balance + interval)) {
|
if (time_after_eq(jiffies, sd->last_balance + interval)) {
|
||||||
if (load_balance(cpu, rq, sd, idle, &balance)) {
|
if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
|
||||||
/*
|
/*
|
||||||
* We've pulled tasks over so either we're no
|
* We've pulled tasks over so either we're no
|
||||||
* longer idle, or one of our SMT siblings is
|
* longer idle, or one of our SMT siblings is
|
||||||
|
@ -4945,7 +4949,7 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
|
||||||
cpuset_cpus_allowed(p, &cpus_allowed);
|
cpuset_cpus_allowed(p, &cpus_allowed);
|
||||||
cpus_and(new_mask, new_mask, cpus_allowed);
|
cpus_and(new_mask, new_mask, cpus_allowed);
|
||||||
again:
|
again:
|
||||||
retval = set_cpus_allowed(p, new_mask);
|
retval = set_cpus_allowed_ptr(p, &new_mask);
|
||||||
|
|
||||||
if (!retval) {
|
if (!retval) {
|
||||||
cpuset_cpus_allowed(p, &cpus_allowed);
|
cpuset_cpus_allowed(p, &cpus_allowed);
|
||||||
|
@ -5700,7 +5704,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
||||||
*/
|
*/
|
||||||
static void migrate_nr_uninterruptible(struct rq *rq_src)
|
static void migrate_nr_uninterruptible(struct rq *rq_src)
|
||||||
{
|
{
|
||||||
struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
|
struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
@ -6118,14 +6122,14 @@ EXPORT_SYMBOL(nr_cpu_ids);
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_DEBUG
|
#ifdef CONFIG_SCHED_DEBUG
|
||||||
|
|
||||||
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
|
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||||
|
cpumask_t *groupmask)
|
||||||
{
|
{
|
||||||
struct sched_group *group = sd->groups;
|
struct sched_group *group = sd->groups;
|
||||||
cpumask_t groupmask;
|
|
||||||
char str[256];
|
char str[256];
|
||||||
|
|
||||||
cpulist_scnprintf(str, sizeof(str), sd->span);
|
cpulist_scnprintf(str, sizeof(str), sd->span);
|
||||||
cpus_clear(groupmask);
|
cpus_clear(*groupmask);
|
||||||
|
|
||||||
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
|
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
|
||||||
|
|
||||||
|
@ -6169,13 +6173,13 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpus_intersects(groupmask, group->cpumask)) {
|
if (cpus_intersects(*groupmask, group->cpumask)) {
|
||||||
printk(KERN_CONT "\n");
|
printk(KERN_CONT "\n");
|
||||||
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpus_or(groupmask, groupmask, group->cpumask);
|
cpus_or(*groupmask, *groupmask, group->cpumask);
|
||||||
|
|
||||||
cpulist_scnprintf(str, sizeof(str), group->cpumask);
|
cpulist_scnprintf(str, sizeof(str), group->cpumask);
|
||||||
printk(KERN_CONT " %s", str);
|
printk(KERN_CONT " %s", str);
|
||||||
|
@ -6184,10 +6188,10 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
|
||||||
} while (group != sd->groups);
|
} while (group != sd->groups);
|
||||||
printk(KERN_CONT "\n");
|
printk(KERN_CONT "\n");
|
||||||
|
|
||||||
if (!cpus_equal(sd->span, groupmask))
|
if (!cpus_equal(sd->span, *groupmask))
|
||||||
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
|
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
|
||||||
|
|
||||||
if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
|
if (sd->parent && !cpus_subset(*groupmask, sd->parent->span))
|
||||||
printk(KERN_ERR "ERROR: parent span is not a superset "
|
printk(KERN_ERR "ERROR: parent span is not a superset "
|
||||||
"of domain->span\n");
|
"of domain->span\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -6195,6 +6199,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
|
||||||
|
|
||||||
static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
||||||
{
|
{
|
||||||
|
cpumask_t *groupmask;
|
||||||
int level = 0;
|
int level = 0;
|
||||||
|
|
||||||
if (!sd) {
|
if (!sd) {
|
||||||
|
@ -6204,14 +6209,21 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
||||||
|
|
||||||
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
|
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
|
||||||
|
|
||||||
|
groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
||||||
|
if (!groupmask) {
|
||||||
|
printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (sched_domain_debug_one(sd, cpu, level))
|
if (sched_domain_debug_one(sd, cpu, level, groupmask))
|
||||||
break;
|
break;
|
||||||
level++;
|
level++;
|
||||||
sd = sd->parent;
|
sd = sd->parent;
|
||||||
if (!sd)
|
if (!sd)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
kfree(groupmask);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
# define sched_domain_debug(sd, cpu) do { } while (0)
|
# define sched_domain_debug(sd, cpu) do { } while (0)
|
||||||
|
@ -6399,30 +6411,33 @@ __setup("isolcpus=", isolated_cpu_setup);
|
||||||
* and ->cpu_power to 0.
|
* and ->cpu_power to 0.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
|
init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
|
||||||
int (*group_fn)(int cpu, const cpumask_t *cpu_map,
|
int (*group_fn)(int cpu, const cpumask_t *cpu_map,
|
||||||
struct sched_group **sg))
|
struct sched_group **sg,
|
||||||
|
cpumask_t *tmpmask),
|
||||||
|
cpumask_t *covered, cpumask_t *tmpmask)
|
||||||
{
|
{
|
||||||
struct sched_group *first = NULL, *last = NULL;
|
struct sched_group *first = NULL, *last = NULL;
|
||||||
cpumask_t covered = CPU_MASK_NONE;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_cpu_mask(i, span) {
|
cpus_clear(*covered);
|
||||||
|
|
||||||
|
for_each_cpu_mask(i, *span) {
|
||||||
struct sched_group *sg;
|
struct sched_group *sg;
|
||||||
int group = group_fn(i, cpu_map, &sg);
|
int group = group_fn(i, cpu_map, &sg, tmpmask);
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
if (cpu_isset(i, covered))
|
if (cpu_isset(i, *covered))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
sg->cpumask = CPU_MASK_NONE;
|
cpus_clear(sg->cpumask);
|
||||||
sg->__cpu_power = 0;
|
sg->__cpu_power = 0;
|
||||||
|
|
||||||
for_each_cpu_mask(j, span) {
|
for_each_cpu_mask(j, *span) {
|
||||||
if (group_fn(j, cpu_map, NULL) != group)
|
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
cpu_set(j, covered);
|
cpu_set(j, *covered);
|
||||||
cpu_set(j, sg->cpumask);
|
cpu_set(j, sg->cpumask);
|
||||||
}
|
}
|
||||||
if (!first)
|
if (!first)
|
||||||
|
@ -6520,7 +6535,8 @@ static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
|
||||||
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
|
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
||||||
|
cpumask_t *unused)
|
||||||
{
|
{
|
||||||
if (sg)
|
if (sg)
|
||||||
*sg = &per_cpu(sched_group_cpus, cpu);
|
*sg = &per_cpu(sched_group_cpus, cpu);
|
||||||
|
@ -6538,19 +6554,22 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
|
||||||
|
|
||||||
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
||||||
static int
|
static int
|
||||||
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
||||||
|
cpumask_t *mask)
|
||||||
{
|
{
|
||||||
int group;
|
int group;
|
||||||
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
|
|
||||||
cpus_and(mask, mask, *cpu_map);
|
*mask = per_cpu(cpu_sibling_map, cpu);
|
||||||
group = first_cpu(mask);
|
cpus_and(*mask, *mask, *cpu_map);
|
||||||
|
group = first_cpu(*mask);
|
||||||
if (sg)
|
if (sg)
|
||||||
*sg = &per_cpu(sched_group_core, group);
|
*sg = &per_cpu(sched_group_core, group);
|
||||||
return group;
|
return group;
|
||||||
}
|
}
|
||||||
#elif defined(CONFIG_SCHED_MC)
|
#elif defined(CONFIG_SCHED_MC)
|
||||||
static int
|
static int
|
||||||
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
||||||
|
cpumask_t *unused)
|
||||||
{
|
{
|
||||||
if (sg)
|
if (sg)
|
||||||
*sg = &per_cpu(sched_group_core, cpu);
|
*sg = &per_cpu(sched_group_core, cpu);
|
||||||
|
@ -6562,17 +6581,18 @@ static DEFINE_PER_CPU(struct sched_domain, phys_domains);
|
||||||
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
|
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
|
cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
|
||||||
|
cpumask_t *mask)
|
||||||
{
|
{
|
||||||
int group;
|
int group;
|
||||||
#ifdef CONFIG_SCHED_MC
|
#ifdef CONFIG_SCHED_MC
|
||||||
cpumask_t mask = cpu_coregroup_map(cpu);
|
*mask = cpu_coregroup_map(cpu);
|
||||||
cpus_and(mask, mask, *cpu_map);
|
cpus_and(*mask, *mask, *cpu_map);
|
||||||
group = first_cpu(mask);
|
group = first_cpu(*mask);
|
||||||
#elif defined(CONFIG_SCHED_SMT)
|
#elif defined(CONFIG_SCHED_SMT)
|
||||||
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
|
*mask = per_cpu(cpu_sibling_map, cpu);
|
||||||
cpus_and(mask, mask, *cpu_map);
|
cpus_and(*mask, *mask, *cpu_map);
|
||||||
group = first_cpu(mask);
|
group = first_cpu(*mask);
|
||||||
#else
|
#else
|
||||||
group = cpu;
|
group = cpu;
|
||||||
#endif
|
#endif
|
||||||
|
@ -6594,13 +6614,13 @@ static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
|
||||||
static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
|
static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
|
||||||
|
|
||||||
static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
|
static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
|
||||||
struct sched_group **sg)
|
struct sched_group **sg, cpumask_t *nodemask)
|
||||||
{
|
{
|
||||||
cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
|
|
||||||
int group;
|
int group;
|
||||||
|
|
||||||
cpus_and(nodemask, nodemask, *cpu_map);
|
*nodemask = node_to_cpumask(cpu_to_node(cpu));
|
||||||
group = first_cpu(nodemask);
|
cpus_and(*nodemask, *nodemask, *cpu_map);
|
||||||
|
group = first_cpu(*nodemask);
|
||||||
|
|
||||||
if (sg)
|
if (sg)
|
||||||
*sg = &per_cpu(sched_group_allnodes, group);
|
*sg = &per_cpu(sched_group_allnodes, group);
|
||||||
|
@ -6636,7 +6656,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
/* Free memory allocated for various sched_group structures */
|
/* Free memory allocated for various sched_group structures */
|
||||||
static void free_sched_groups(const cpumask_t *cpu_map)
|
static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
|
||||||
{
|
{
|
||||||
int cpu, i;
|
int cpu, i;
|
||||||
|
|
||||||
|
@ -6648,11 +6668,11 @@ static void free_sched_groups(const cpumask_t *cpu_map)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||||
cpumask_t nodemask = node_to_cpumask(i);
|
|
||||||
struct sched_group *oldsg, *sg = sched_group_nodes[i];
|
struct sched_group *oldsg, *sg = sched_group_nodes[i];
|
||||||
|
|
||||||
cpus_and(nodemask, nodemask, *cpu_map);
|
*nodemask = node_to_cpumask(i);
|
||||||
if (cpus_empty(nodemask))
|
cpus_and(*nodemask, *nodemask, *cpu_map);
|
||||||
|
if (cpus_empty(*nodemask))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (sg == NULL)
|
if (sg == NULL)
|
||||||
|
@ -6670,7 +6690,7 @@ next_sg:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void free_sched_groups(const cpumask_t *cpu_map)
|
static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -6727,6 +6747,65 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
|
||||||
} while (group != child->groups);
|
} while (group != child->groups);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initializers for schedule domains
|
||||||
|
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define SD_INIT(sd, type) sd_init_##type(sd)
|
||||||
|
#define SD_INIT_FUNC(type) \
|
||||||
|
static noinline void sd_init_##type(struct sched_domain *sd) \
|
||||||
|
{ \
|
||||||
|
memset(sd, 0, sizeof(*sd)); \
|
||||||
|
*sd = SD_##type##_INIT; \
|
||||||
|
}
|
||||||
|
|
||||||
|
SD_INIT_FUNC(CPU)
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
SD_INIT_FUNC(ALLNODES)
|
||||||
|
SD_INIT_FUNC(NODE)
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_SCHED_SMT
|
||||||
|
SD_INIT_FUNC(SIBLING)
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_SCHED_MC
|
||||||
|
SD_INIT_FUNC(MC)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To minimize stack usage kmalloc room for cpumasks and share the
|
||||||
|
* space as the usage in build_sched_domains() dictates. Used only
|
||||||
|
* if the amount of space is significant.
|
||||||
|
*/
|
||||||
|
struct allmasks {
|
||||||
|
cpumask_t tmpmask; /* make this one first */
|
||||||
|
union {
|
||||||
|
cpumask_t nodemask;
|
||||||
|
cpumask_t this_sibling_map;
|
||||||
|
cpumask_t this_core_map;
|
||||||
|
};
|
||||||
|
cpumask_t send_covered;
|
||||||
|
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
cpumask_t domainspan;
|
||||||
|
cpumask_t covered;
|
||||||
|
cpumask_t notcovered;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
#if NR_CPUS > 128
|
||||||
|
#define SCHED_CPUMASK_ALLOC 1
|
||||||
|
#define SCHED_CPUMASK_FREE(v) kfree(v)
|
||||||
|
#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
|
||||||
|
#else
|
||||||
|
#define SCHED_CPUMASK_ALLOC 0
|
||||||
|
#define SCHED_CPUMASK_FREE(v)
|
||||||
|
#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
|
||||||
|
((unsigned long)(a) + offsetof(struct allmasks, v))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Build sched domains for a given set of cpus and attach the sched domains
|
* Build sched domains for a given set of cpus and attach the sched domains
|
||||||
* to the individual cpus
|
* to the individual cpus
|
||||||
|
@ -6735,6 +6814,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct root_domain *rd;
|
struct root_domain *rd;
|
||||||
|
SCHED_CPUMASK_DECLARE(allmasks);
|
||||||
|
cpumask_t *tmpmask;
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
struct sched_group **sched_group_nodes = NULL;
|
struct sched_group **sched_group_nodes = NULL;
|
||||||
int sd_allnodes = 0;
|
int sd_allnodes = 0;
|
||||||
|
@ -6748,38 +6829,60 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
rd = alloc_rootdomain();
|
rd = alloc_rootdomain();
|
||||||
if (!rd) {
|
if (!rd) {
|
||||||
printk(KERN_WARNING "Cannot alloc root domain\n");
|
printk(KERN_WARNING "Cannot alloc root domain\n");
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
kfree(sched_group_nodes);
|
||||||
|
#endif
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if SCHED_CPUMASK_ALLOC
|
||||||
|
/* get space for all scratch cpumask variables */
|
||||||
|
allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
|
||||||
|
if (!allmasks) {
|
||||||
|
printk(KERN_WARNING "Cannot alloc cpumask array\n");
|
||||||
|
kfree(rd);
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
kfree(sched_group_nodes);
|
||||||
|
#endif
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
tmpmask = (cpumask_t *)allmasks;
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_NUMA
|
||||||
|
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up domains for cpus specified by the cpu_map.
|
* Set up domains for cpus specified by the cpu_map.
|
||||||
*/
|
*/
|
||||||
for_each_cpu_mask(i, *cpu_map) {
|
for_each_cpu_mask(i, *cpu_map) {
|
||||||
struct sched_domain *sd = NULL, *p;
|
struct sched_domain *sd = NULL, *p;
|
||||||
cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
|
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
||||||
|
|
||||||
cpus_and(nodemask, nodemask, *cpu_map);
|
*nodemask = node_to_cpumask(cpu_to_node(i));
|
||||||
|
cpus_and(*nodemask, *nodemask, *cpu_map);
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
if (cpus_weight(*cpu_map) >
|
if (cpus_weight(*cpu_map) >
|
||||||
SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
|
SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
|
||||||
sd = &per_cpu(allnodes_domains, i);
|
sd = &per_cpu(allnodes_domains, i);
|
||||||
*sd = SD_ALLNODES_INIT;
|
SD_INIT(sd, ALLNODES);
|
||||||
sd->span = *cpu_map;
|
sd->span = *cpu_map;
|
||||||
cpu_to_allnodes_group(i, cpu_map, &sd->groups);
|
cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
|
||||||
p = sd;
|
p = sd;
|
||||||
sd_allnodes = 1;
|
sd_allnodes = 1;
|
||||||
} else
|
} else
|
||||||
p = NULL;
|
p = NULL;
|
||||||
|
|
||||||
sd = &per_cpu(node_domains, i);
|
sd = &per_cpu(node_domains, i);
|
||||||
*sd = SD_NODE_INIT;
|
SD_INIT(sd, NODE);
|
||||||
sd->span = sched_domain_node_span(cpu_to_node(i));
|
sd->span = sched_domain_node_span(cpu_to_node(i));
|
||||||
sd->parent = p;
|
sd->parent = p;
|
||||||
if (p)
|
if (p)
|
||||||
|
@ -6789,94 +6892,114 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
|
|
||||||
p = sd;
|
p = sd;
|
||||||
sd = &per_cpu(phys_domains, i);
|
sd = &per_cpu(phys_domains, i);
|
||||||
*sd = SD_CPU_INIT;
|
SD_INIT(sd, CPU);
|
||||||
sd->span = nodemask;
|
sd->span = *nodemask;
|
||||||
sd->parent = p;
|
sd->parent = p;
|
||||||
if (p)
|
if (p)
|
||||||
p->child = sd;
|
p->child = sd;
|
||||||
cpu_to_phys_group(i, cpu_map, &sd->groups);
|
cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_MC
|
#ifdef CONFIG_SCHED_MC
|
||||||
p = sd;
|
p = sd;
|
||||||
sd = &per_cpu(core_domains, i);
|
sd = &per_cpu(core_domains, i);
|
||||||
*sd = SD_MC_INIT;
|
SD_INIT(sd, MC);
|
||||||
sd->span = cpu_coregroup_map(i);
|
sd->span = cpu_coregroup_map(i);
|
||||||
cpus_and(sd->span, sd->span, *cpu_map);
|
cpus_and(sd->span, sd->span, *cpu_map);
|
||||||
sd->parent = p;
|
sd->parent = p;
|
||||||
p->child = sd;
|
p->child = sd;
|
||||||
cpu_to_core_group(i, cpu_map, &sd->groups);
|
cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_SMT
|
#ifdef CONFIG_SCHED_SMT
|
||||||
p = sd;
|
p = sd;
|
||||||
sd = &per_cpu(cpu_domains, i);
|
sd = &per_cpu(cpu_domains, i);
|
||||||
*sd = SD_SIBLING_INIT;
|
SD_INIT(sd, SIBLING);
|
||||||
sd->span = per_cpu(cpu_sibling_map, i);
|
sd->span = per_cpu(cpu_sibling_map, i);
|
||||||
cpus_and(sd->span, sd->span, *cpu_map);
|
cpus_and(sd->span, sd->span, *cpu_map);
|
||||||
sd->parent = p;
|
sd->parent = p;
|
||||||
p->child = sd;
|
p->child = sd;
|
||||||
cpu_to_cpu_group(i, cpu_map, &sd->groups);
|
cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_SMT
|
#ifdef CONFIG_SCHED_SMT
|
||||||
/* Set up CPU (sibling) groups */
|
/* Set up CPU (sibling) groups */
|
||||||
for_each_cpu_mask(i, *cpu_map) {
|
for_each_cpu_mask(i, *cpu_map) {
|
||||||
cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
|
SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
|
||||||
cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
|
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
||||||
if (i != first_cpu(this_sibling_map))
|
|
||||||
|
*this_sibling_map = per_cpu(cpu_sibling_map, i);
|
||||||
|
cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
|
||||||
|
if (i != first_cpu(*this_sibling_map))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
init_sched_build_groups(this_sibling_map, cpu_map,
|
init_sched_build_groups(this_sibling_map, cpu_map,
|
||||||
&cpu_to_cpu_group);
|
&cpu_to_cpu_group,
|
||||||
|
send_covered, tmpmask);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_MC
|
#ifdef CONFIG_SCHED_MC
|
||||||
/* Set up multi-core groups */
|
/* Set up multi-core groups */
|
||||||
for_each_cpu_mask(i, *cpu_map) {
|
for_each_cpu_mask(i, *cpu_map) {
|
||||||
cpumask_t this_core_map = cpu_coregroup_map(i);
|
SCHED_CPUMASK_VAR(this_core_map, allmasks);
|
||||||
cpus_and(this_core_map, this_core_map, *cpu_map);
|
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
||||||
if (i != first_cpu(this_core_map))
|
|
||||||
|
*this_core_map = cpu_coregroup_map(i);
|
||||||
|
cpus_and(*this_core_map, *this_core_map, *cpu_map);
|
||||||
|
if (i != first_cpu(*this_core_map))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
init_sched_build_groups(this_core_map, cpu_map,
|
init_sched_build_groups(this_core_map, cpu_map,
|
||||||
&cpu_to_core_group);
|
&cpu_to_core_group,
|
||||||
|
send_covered, tmpmask);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Set up physical groups */
|
/* Set up physical groups */
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||||
cpumask_t nodemask = node_to_cpumask(i);
|
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
||||||
|
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
||||||
|
|
||||||
cpus_and(nodemask, nodemask, *cpu_map);
|
*nodemask = node_to_cpumask(i);
|
||||||
if (cpus_empty(nodemask))
|
cpus_and(*nodemask, *nodemask, *cpu_map);
|
||||||
|
if (cpus_empty(*nodemask))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
|
init_sched_build_groups(nodemask, cpu_map,
|
||||||
|
&cpu_to_phys_group,
|
||||||
|
send_covered, tmpmask);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
/* Set up node groups */
|
/* Set up node groups */
|
||||||
if (sd_allnodes)
|
if (sd_allnodes) {
|
||||||
init_sched_build_groups(*cpu_map, cpu_map,
|
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
||||||
&cpu_to_allnodes_group);
|
|
||||||
|
init_sched_build_groups(cpu_map, cpu_map,
|
||||||
|
&cpu_to_allnodes_group,
|
||||||
|
send_covered, tmpmask);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||||
/* Set up node groups */
|
/* Set up node groups */
|
||||||
struct sched_group *sg, *prev;
|
struct sched_group *sg, *prev;
|
||||||
cpumask_t nodemask = node_to_cpumask(i);
|
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
||||||
cpumask_t domainspan;
|
SCHED_CPUMASK_VAR(domainspan, allmasks);
|
||||||
cpumask_t covered = CPU_MASK_NONE;
|
SCHED_CPUMASK_VAR(covered, allmasks);
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
cpus_and(nodemask, nodemask, *cpu_map);
|
*nodemask = node_to_cpumask(i);
|
||||||
if (cpus_empty(nodemask)) {
|
cpus_clear(*covered);
|
||||||
|
|
||||||
|
cpus_and(*nodemask, *nodemask, *cpu_map);
|
||||||
|
if (cpus_empty(*nodemask)) {
|
||||||
sched_group_nodes[i] = NULL;
|
sched_group_nodes[i] = NULL;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
domainspan = sched_domain_node_span(i);
|
*domainspan = sched_domain_node_span(i);
|
||||||
cpus_and(domainspan, domainspan, *cpu_map);
|
cpus_and(*domainspan, *domainspan, *cpu_map);
|
||||||
|
|
||||||
sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
|
sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
|
||||||
if (!sg) {
|
if (!sg) {
|
||||||
|
@ -6885,31 +7008,31 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
sched_group_nodes[i] = sg;
|
sched_group_nodes[i] = sg;
|
||||||
for_each_cpu_mask(j, nodemask) {
|
for_each_cpu_mask(j, *nodemask) {
|
||||||
struct sched_domain *sd;
|
struct sched_domain *sd;
|
||||||
|
|
||||||
sd = &per_cpu(node_domains, j);
|
sd = &per_cpu(node_domains, j);
|
||||||
sd->groups = sg;
|
sd->groups = sg;
|
||||||
}
|
}
|
||||||
sg->__cpu_power = 0;
|
sg->__cpu_power = 0;
|
||||||
sg->cpumask = nodemask;
|
sg->cpumask = *nodemask;
|
||||||
sg->next = sg;
|
sg->next = sg;
|
||||||
cpus_or(covered, covered, nodemask);
|
cpus_or(*covered, *covered, *nodemask);
|
||||||
prev = sg;
|
prev = sg;
|
||||||
|
|
||||||
for (j = 0; j < MAX_NUMNODES; j++) {
|
for (j = 0; j < MAX_NUMNODES; j++) {
|
||||||
cpumask_t tmp, notcovered;
|
SCHED_CPUMASK_VAR(notcovered, allmasks);
|
||||||
int n = (i + j) % MAX_NUMNODES;
|
int n = (i + j) % MAX_NUMNODES;
|
||||||
node_to_cpumask_ptr(pnodemask, n);
|
node_to_cpumask_ptr(pnodemask, n);
|
||||||
|
|
||||||
cpus_complement(notcovered, covered);
|
cpus_complement(*notcovered, *covered);
|
||||||
cpus_and(tmp, notcovered, *cpu_map);
|
cpus_and(*tmpmask, *notcovered, *cpu_map);
|
||||||
cpus_and(tmp, tmp, domainspan);
|
cpus_and(*tmpmask, *tmpmask, *domainspan);
|
||||||
if (cpus_empty(tmp))
|
if (cpus_empty(*tmpmask))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
cpus_and(tmp, tmp, *pnodemask);
|
cpus_and(*tmpmask, *tmpmask, *pnodemask);
|
||||||
if (cpus_empty(tmp))
|
if (cpus_empty(*tmpmask))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
sg = kmalloc_node(sizeof(struct sched_group),
|
sg = kmalloc_node(sizeof(struct sched_group),
|
||||||
|
@ -6920,9 +7043,9 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
sg->__cpu_power = 0;
|
sg->__cpu_power = 0;
|
||||||
sg->cpumask = tmp;
|
sg->cpumask = *tmpmask;
|
||||||
sg->next = prev->next;
|
sg->next = prev->next;
|
||||||
cpus_or(covered, covered, tmp);
|
cpus_or(*covered, *covered, *tmpmask);
|
||||||
prev->next = sg;
|
prev->next = sg;
|
||||||
prev = sg;
|
prev = sg;
|
||||||
}
|
}
|
||||||
|
@ -6958,7 +7081,8 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
if (sd_allnodes) {
|
if (sd_allnodes) {
|
||||||
struct sched_group *sg;
|
struct sched_group *sg;
|
||||||
|
|
||||||
cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
|
cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg,
|
||||||
|
tmpmask);
|
||||||
init_numa_sched_groups_power(sg);
|
init_numa_sched_groups_power(sg);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -6976,11 +7100,13 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
cpu_attach_domain(sd, rd, i);
|
cpu_attach_domain(sd, rd, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SCHED_CPUMASK_FREE((void *)allmasks);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
error:
|
error:
|
||||||
free_sched_groups(cpu_map);
|
free_sched_groups(cpu_map, tmpmask);
|
||||||
|
SCHED_CPUMASK_FREE((void *)allmasks);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -7020,9 +7146,10 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
|
static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
|
||||||
|
cpumask_t *tmpmask)
|
||||||
{
|
{
|
||||||
free_sched_groups(cpu_map);
|
free_sched_groups(cpu_map, tmpmask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -7031,6 +7158,7 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
|
||||||
*/
|
*/
|
||||||
static void detach_destroy_domains(const cpumask_t *cpu_map)
|
static void detach_destroy_domains(const cpumask_t *cpu_map)
|
||||||
{
|
{
|
||||||
|
cpumask_t tmpmask;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
unregister_sched_domain_sysctl();
|
unregister_sched_domain_sysctl();
|
||||||
|
@ -7038,7 +7166,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
|
||||||
for_each_cpu_mask(i, *cpu_map)
|
for_each_cpu_mask(i, *cpu_map)
|
||||||
cpu_attach_domain(NULL, &def_root_domain, i);
|
cpu_attach_domain(NULL, &def_root_domain, i);
|
||||||
synchronize_sched();
|
synchronize_sched();
|
||||||
arch_destroy_sched_domains(cpu_map);
|
arch_destroy_sched_domains(cpu_map, &tmpmask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -7246,7 +7374,7 @@ void __init sched_init_smp(void)
|
||||||
hotcpu_notifier(update_sched_domains, 0);
|
hotcpu_notifier(update_sched_domains, 0);
|
||||||
|
|
||||||
/* Move init over to a non-isolated CPU */
|
/* Move init over to a non-isolated CPU */
|
||||||
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
|
if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
|
||||||
BUG();
|
BUG();
|
||||||
sched_init_granularity();
|
sched_init_granularity();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue