mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
nodemask: use new node_to_cpumask_ptr function
* Use new node_to_cpumask_ptr. This creates a pointer to the cpumask for a given node. This definition is in mm patch: asm-generic-add-node_to_cpumask_ptr-macro.patch * Use new set_cpus_allowed_ptr function. Depends on: [mm-patch]: asm-generic-add-node_to_cpumask_ptr-macro.patch [sched-devel]: sched: add new set_cpus_allowed_ptr function [x86/latest]: x86: add cpus_scnprintf function Cc: Greg Kroah-Hartman <gregkh@suse.de> Cc: Greg Banks <gnb@melbourne.sgi.com> Cc: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
b53e921ba1
commit
c5f59f0833
6 changed files with 42 additions and 39 deletions
|
@ -22,14 +22,15 @@ static struct sysdev_class node_class = {
|
||||||
static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
|
static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
|
||||||
{
|
{
|
||||||
struct node *node_dev = to_node(dev);
|
struct node *node_dev = to_node(dev);
|
||||||
cpumask_t mask = node_to_cpumask(node_dev->sysdev.id);
|
node_to_cpumask_ptr(mask, node_dev->sysdev.id);
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
/* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */
|
/* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */
|
||||||
BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2);
|
BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2);
|
||||||
|
|
||||||
len = cpumask_scnprintf(buf, PAGE_SIZE-1, mask);
|
len = cpumask_scnprintf(buf, PAGE_SIZE-2, *mask);
|
||||||
len += sprintf(buf + len, "\n");
|
buf[len++] = '\n';
|
||||||
|
buf[len] = '\0';
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6448,7 +6448,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
|
||||||
*
|
*
|
||||||
* Should use nodemask_t.
|
* Should use nodemask_t.
|
||||||
*/
|
*/
|
||||||
static int find_next_best_node(int node, unsigned long *used_nodes)
|
static int find_next_best_node(int node, nodemask_t *used_nodes)
|
||||||
{
|
{
|
||||||
int i, n, val, min_val, best_node = 0;
|
int i, n, val, min_val, best_node = 0;
|
||||||
|
|
||||||
|
@ -6462,7 +6462,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Skip already used nodes */
|
/* Skip already used nodes */
|
||||||
if (test_bit(n, used_nodes))
|
if (node_isset(n, *used_nodes))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Simple min distance search */
|
/* Simple min distance search */
|
||||||
|
@ -6474,14 +6474,13 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
set_bit(best_node, used_nodes);
|
node_set(best_node, *used_nodes);
|
||||||
return best_node;
|
return best_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sched_domain_node_span - get a cpumask for a node's sched_domain
|
* sched_domain_node_span - get a cpumask for a node's sched_domain
|
||||||
* @node: node whose cpumask we're constructing
|
* @node: node whose cpumask we're constructing
|
||||||
* @size: number of nodes to include in this span
|
|
||||||
*
|
*
|
||||||
* Given a node, construct a good cpumask for its sched_domain to span. It
|
* Given a node, construct a good cpumask for its sched_domain to span. It
|
||||||
* should be one that prevents unnecessary balancing, but also spreads tasks
|
* should be one that prevents unnecessary balancing, but also spreads tasks
|
||||||
|
@ -6489,22 +6488,22 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
|
||||||
*/
|
*/
|
||||||
static cpumask_t sched_domain_node_span(int node)
|
static cpumask_t sched_domain_node_span(int node)
|
||||||
{
|
{
|
||||||
DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
|
nodemask_t used_nodes;
|
||||||
cpumask_t span, nodemask;
|
cpumask_t span;
|
||||||
|
node_to_cpumask_ptr(nodemask, node);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cpus_clear(span);
|
cpus_clear(span);
|
||||||
bitmap_zero(used_nodes, MAX_NUMNODES);
|
nodes_clear(used_nodes);
|
||||||
|
|
||||||
nodemask = node_to_cpumask(node);
|
cpus_or(span, span, *nodemask);
|
||||||
cpus_or(span, span, nodemask);
|
node_set(node, used_nodes);
|
||||||
set_bit(node, used_nodes);
|
|
||||||
|
|
||||||
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
|
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
|
||||||
int next_node = find_next_best_node(node, used_nodes);
|
int next_node = find_next_best_node(node, &used_nodes);
|
||||||
|
|
||||||
nodemask = node_to_cpumask(next_node);
|
node_to_cpumask_ptr_next(nodemask, next_node);
|
||||||
cpus_or(span, span, nodemask);
|
cpus_or(span, span, *nodemask);
|
||||||
}
|
}
|
||||||
|
|
||||||
return span;
|
return span;
|
||||||
|
@ -6901,6 +6900,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
for (j = 0; j < MAX_NUMNODES; j++) {
|
for (j = 0; j < MAX_NUMNODES; j++) {
|
||||||
cpumask_t tmp, notcovered;
|
cpumask_t tmp, notcovered;
|
||||||
int n = (i + j) % MAX_NUMNODES;
|
int n = (i + j) % MAX_NUMNODES;
|
||||||
|
node_to_cpumask_ptr(pnodemask, n);
|
||||||
|
|
||||||
cpus_complement(notcovered, covered);
|
cpus_complement(notcovered, covered);
|
||||||
cpus_and(tmp, notcovered, *cpu_map);
|
cpus_and(tmp, notcovered, *cpu_map);
|
||||||
|
@ -6908,8 +6908,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
|
||||||
if (cpus_empty(tmp))
|
if (cpus_empty(tmp))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
nodemask = node_to_cpumask(n);
|
cpus_and(tmp, tmp, *pnodemask);
|
||||||
cpus_and(tmp, tmp, nodemask);
|
|
||||||
if (cpus_empty(tmp))
|
if (cpus_empty(tmp))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -2029,6 +2029,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
|
||||||
int n, val;
|
int n, val;
|
||||||
int min_val = INT_MAX;
|
int min_val = INT_MAX;
|
||||||
int best_node = -1;
|
int best_node = -1;
|
||||||
|
node_to_cpumask_ptr(tmp, 0);
|
||||||
|
|
||||||
/* Use the local node if we haven't already */
|
/* Use the local node if we haven't already */
|
||||||
if (!node_isset(node, *used_node_mask)) {
|
if (!node_isset(node, *used_node_mask)) {
|
||||||
|
@ -2037,7 +2038,6 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_node_state(n, N_HIGH_MEMORY) {
|
for_each_node_state(n, N_HIGH_MEMORY) {
|
||||||
cpumask_t tmp;
|
|
||||||
|
|
||||||
/* Don't want a node to appear more than once */
|
/* Don't want a node to appear more than once */
|
||||||
if (node_isset(n, *used_node_mask))
|
if (node_isset(n, *used_node_mask))
|
||||||
|
@ -2050,8 +2050,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
|
||||||
val += (n < node);
|
val += (n < node);
|
||||||
|
|
||||||
/* Give preference to headless and unused nodes */
|
/* Give preference to headless and unused nodes */
|
||||||
tmp = node_to_cpumask(n);
|
node_to_cpumask_ptr_next(tmp, n);
|
||||||
if (!cpus_empty(tmp))
|
if (!cpus_empty(*tmp))
|
||||||
val += PENALTY_FOR_NODE_WITH_CPUS;
|
val += PENALTY_FOR_NODE_WITH_CPUS;
|
||||||
|
|
||||||
/* Slight preference for less loaded node */
|
/* Slight preference for less loaded node */
|
||||||
|
|
|
@ -1160,14 +1160,13 @@ static void __cpuinit cpuup_canceled(long cpu)
|
||||||
struct kmem_cache *cachep;
|
struct kmem_cache *cachep;
|
||||||
struct kmem_list3 *l3 = NULL;
|
struct kmem_list3 *l3 = NULL;
|
||||||
int node = cpu_to_node(cpu);
|
int node = cpu_to_node(cpu);
|
||||||
|
node_to_cpumask_ptr(mask, node);
|
||||||
|
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
list_for_each_entry(cachep, &cache_chain, next) {
|
||||||
struct array_cache *nc;
|
struct array_cache *nc;
|
||||||
struct array_cache *shared;
|
struct array_cache *shared;
|
||||||
struct array_cache **alien;
|
struct array_cache **alien;
|
||||||
cpumask_t mask;
|
|
||||||
|
|
||||||
mask = node_to_cpumask(node);
|
|
||||||
/* cpu is dead; no one can alloc from it. */
|
/* cpu is dead; no one can alloc from it. */
|
||||||
nc = cachep->array[cpu];
|
nc = cachep->array[cpu];
|
||||||
cachep->array[cpu] = NULL;
|
cachep->array[cpu] = NULL;
|
||||||
|
@ -1183,7 +1182,7 @@ static void __cpuinit cpuup_canceled(long cpu)
|
||||||
if (nc)
|
if (nc)
|
||||||
free_block(cachep, nc->entry, nc->avail, node);
|
free_block(cachep, nc->entry, nc->avail, node);
|
||||||
|
|
||||||
if (!cpus_empty(mask)) {
|
if (!cpus_empty(*mask)) {
|
||||||
spin_unlock_irq(&l3->list_lock);
|
spin_unlock_irq(&l3->list_lock);
|
||||||
goto free_array_cache;
|
goto free_array_cache;
|
||||||
}
|
}
|
||||||
|
|
18
mm/vmscan.c
18
mm/vmscan.c
|
@ -1647,11 +1647,10 @@ static int kswapd(void *p)
|
||||||
struct reclaim_state reclaim_state = {
|
struct reclaim_state reclaim_state = {
|
||||||
.reclaimed_slab = 0,
|
.reclaimed_slab = 0,
|
||||||
};
|
};
|
||||||
cpumask_t cpumask;
|
node_to_cpumask_ptr(cpumask, pgdat->node_id);
|
||||||
|
|
||||||
cpumask = node_to_cpumask(pgdat->node_id);
|
if (!cpus_empty(*cpumask))
|
||||||
if (!cpus_empty(cpumask))
|
set_cpus_allowed_ptr(tsk, cpumask);
|
||||||
set_cpus_allowed(tsk, cpumask);
|
|
||||||
current->reclaim_state = &reclaim_state;
|
current->reclaim_state = &reclaim_state;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1880,17 +1879,16 @@ out:
|
||||||
static int __devinit cpu_callback(struct notifier_block *nfb,
|
static int __devinit cpu_callback(struct notifier_block *nfb,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
pg_data_t *pgdat;
|
|
||||||
cpumask_t mask;
|
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
|
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
|
||||||
for_each_node_state(nid, N_HIGH_MEMORY) {
|
for_each_node_state(nid, N_HIGH_MEMORY) {
|
||||||
pgdat = NODE_DATA(nid);
|
pg_data_t *pgdat = NODE_DATA(nid);
|
||||||
mask = node_to_cpumask(pgdat->node_id);
|
node_to_cpumask_ptr(mask, pgdat->node_id);
|
||||||
if (any_online_cpu(mask) != NR_CPUS)
|
|
||||||
|
if (any_online_cpu(*mask) < nr_cpu_ids)
|
||||||
/* One of our CPUs online: restore mask */
|
/* One of our CPUs online: restore mask */
|
||||||
set_cpus_allowed(pgdat->kswapd, mask);
|
set_cpus_allowed_ptr(pgdat->kswapd, mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
|
@ -301,7 +301,6 @@ static inline int
|
||||||
svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
|
svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
|
||||||
{
|
{
|
||||||
struct svc_pool_map *m = &svc_pool_map;
|
struct svc_pool_map *m = &svc_pool_map;
|
||||||
unsigned int node; /* or cpu */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller checks for sv_nrpools > 1, which
|
* The caller checks for sv_nrpools > 1, which
|
||||||
|
@ -314,16 +313,23 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
|
||||||
default:
|
default:
|
||||||
return 0;
|
return 0;
|
||||||
case SVC_POOL_PERCPU:
|
case SVC_POOL_PERCPU:
|
||||||
node = m->pool_to[pidx];
|
{
|
||||||
|
unsigned int cpu = m->pool_to[pidx];
|
||||||
|
|
||||||
*oldmask = current->cpus_allowed;
|
*oldmask = current->cpus_allowed;
|
||||||
set_cpus_allowed(current, cpumask_of_cpu(node));
|
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
case SVC_POOL_PERNODE:
|
case SVC_POOL_PERNODE:
|
||||||
node = m->pool_to[pidx];
|
{
|
||||||
|
unsigned int node = m->pool_to[pidx];
|
||||||
|
node_to_cpumask_ptr(nodecpumask, node);
|
||||||
|
|
||||||
*oldmask = current->cpus_allowed;
|
*oldmask = current->cpus_allowed;
|
||||||
set_cpus_allowed(current, node_to_cpumask(node));
|
set_cpus_allowed_ptr(current, nodecpumask);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue