mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
slub: add hooks for kmemcheck
Parts of this patch were contributed by Pekka Enberg but merged for atomicity. Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegardno@ifi.uio.no>
This commit is contained in:
parent
d7002857de
commit
5a896d9e7c
1 changed files with 19 additions and 2 deletions
21
mm/slub.c
21
mm/slub.c
|
@ -18,6 +18,7 @@
|
|||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
@ -147,7 +148,7 @@
|
|||
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
|
||||
|
||||
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
|
||||
SLAB_CACHE_DMA)
|
||||
SLAB_CACHE_DMA | SLAB_NOTRACK)
|
||||
|
||||
#ifndef ARCH_KMALLOC_MINALIGN
|
||||
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
|
||||
|
@ -1092,6 +1093,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
|
||||
stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
|
||||
}
|
||||
|
||||
if (kmemcheck_enabled
|
||||
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
|
||||
{
|
||||
kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page));
|
||||
}
|
||||
|
||||
page->objects = oo_objects(oo);
|
||||
mod_zone_page_state(page_zone(page),
|
||||
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
||||
|
@ -1165,6 +1173,9 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|||
__ClearPageSlubDebug(page);
|
||||
}
|
||||
|
||||
if (kmemcheck_page_is_tracked(page))
|
||||
kmemcheck_free_shadow(s, page, compound_order(page));
|
||||
|
||||
mod_zone_page_state(page_zone(page),
|
||||
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
||||
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
|
||||
|
@ -1618,7 +1629,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|||
if (unlikely((gfpflags & __GFP_ZERO) && object))
|
||||
memset(object, 0, objsize);
|
||||
|
||||
kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
|
||||
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
|
@ -1751,6 +1764,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
|
|||
kmemleak_free_recursive(x, s->flags);
|
||||
local_irq_save(flags);
|
||||
c = get_cpu_slab(s, smp_processor_id());
|
||||
kmemcheck_slab_free(s, object, c->objsize);
|
||||
debug_check_no_locks_freed(object, c->objsize);
|
||||
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
||||
debug_check_no_obj_freed(object, c->objsize);
|
||||
|
@ -2625,7 +2639,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
|
|||
|
||||
if (!s || !text || !kmem_cache_open(s, flags, text,
|
||||
realsize, ARCH_KMALLOC_MINALIGN,
|
||||
SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
|
||||
SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
|
||||
NULL)) {
|
||||
kfree(s);
|
||||
kfree(text);
|
||||
goto unlock_out;
|
||||
|
@ -4396,6 +4411,8 @@ static char *create_unique_id(struct kmem_cache *s)
|
|||
*p++ = 'a';
|
||||
if (s->flags & SLAB_DEBUG_FREE)
|
||||
*p++ = 'F';
|
||||
if (!(s->flags & SLAB_NOTRACK))
|
||||
*p++ = 't';
|
||||
if (p != name + 1)
|
||||
*p++ = '-';
|
||||
p += sprintf(p, "%07d", s->size);
|
||||
|
|
Loading…
Reference in a new issue