mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 12:16:20 +00:00
Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm: slab: fix cache_cache bootstrap in kmem_cache_init() count_partial() is not used if !SLUB_DEBUG and !CONFIG_SLABINFO
This commit is contained in:
commit
5254149f6c
2 changed files with 4 additions and 2 deletions
|
@ -1481,7 +1481,7 @@ void __init kmem_cache_init(void)
|
||||||
list_add(&cache_cache.next, &cache_chain);
|
list_add(&cache_cache.next, &cache_chain);
|
||||||
cache_cache.colour_off = cache_line_size();
|
cache_cache.colour_off = cache_line_size();
|
||||||
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
||||||
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
|
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct kmem_cache size depends on nr_node_ids, which
|
* struct kmem_cache size depends on nr_node_ids, which
|
||||||
|
@ -1602,7 +1602,7 @@ void __init kmem_cache_init(void)
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
for_each_online_node(nid) {
|
for_each_online_node(nid) {
|
||||||
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid);
|
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
|
||||||
|
|
||||||
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
init_list(malloc_sizes[INDEX_AC].cs_cachep,
|
||||||
&initkmem_list3[SIZE_AC + nid], nid);
|
&initkmem_list3[SIZE_AC + nid], nid);
|
||||||
|
|
|
@ -2685,6 +2685,7 @@ void kfree(const void *x)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kfree);
|
EXPORT_SYMBOL(kfree);
|
||||||
|
|
||||||
|
#if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO)
|
||||||
static unsigned long count_partial(struct kmem_cache_node *n)
|
static unsigned long count_partial(struct kmem_cache_node *n)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -2697,6 +2698,7 @@ static unsigned long count_partial(struct kmem_cache_node *n)
|
||||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
|
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
|
||||||
|
|
Loading…
Reference in a new issue