mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
slab: add hooks for kmemcheck
We now have SLAB support for kmemcheck! This means that it doesn't matter whether one chooses SLAB or SLUB, or indeed whether Linus chooses to chuck SLAB or SLUB.. ;-) Cc: Ingo Molnar <mingo@elte.hu> Cc: Christoph Lameter <clameter@sgi.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> [rebased for mainline inclusion] Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
This commit is contained in:
parent
5a896d9e7c
commit
c175eea466
1 changed files with 18 additions and 2 deletions
20
mm/slab.c
20
mm/slab.c
|
@ -114,6 +114,7 @@
|
|||
#include <linux/rtmutex.h>
|
||||
#include <linux/reciprocal_div.h>
|
||||
#include <linux/debugobjects.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
@ -179,13 +180,13 @@
|
|||
SLAB_STORE_USER | \
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
|
||||
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
|
||||
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
|
||||
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
|
||||
#else
|
||||
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
|
||||
SLAB_CACHE_DMA | \
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
|
||||
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
|
||||
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
|
||||
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -1624,6 +1625,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
|||
NR_SLAB_UNRECLAIMABLE, nr_pages);
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
__SetPageSlab(page + i);
|
||||
|
||||
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
|
||||
kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);
|
||||
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
|
@ -1636,6 +1641,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
|
|||
struct page *page = virt_to_page(addr);
|
||||
const unsigned long nr_freed = i;
|
||||
|
||||
if (kmemcheck_page_is_tracked(page))
|
||||
kmemcheck_free_shadow(cachep, page, cachep->gfporder);
|
||||
|
||||
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
|
||||
sub_zone_page_state(page_zone(page),
|
||||
NR_SLAB_RECLAIMABLE, nr_freed);
|
||||
|
@ -3309,6 +3317,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
|||
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
|
||||
flags);
|
||||
|
||||
if (likely(ptr))
|
||||
kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
|
||||
|
||||
if (unlikely((flags & __GFP_ZERO) && ptr))
|
||||
memset(ptr, 0, obj_size(cachep));
|
||||
|
||||
|
@ -3367,6 +3378,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
|
|||
flags);
|
||||
prefetchw(objp);
|
||||
|
||||
if (likely(objp))
|
||||
kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
|
||||
|
||||
if (unlikely((flags & __GFP_ZERO) && objp))
|
||||
memset(objp, 0, obj_size(cachep));
|
||||
|
||||
|
@ -3483,6 +3497,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
|
|||
kmemleak_free_recursive(objp, cachep->flags);
|
||||
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
|
||||
|
||||
kmemcheck_slab_free(cachep, objp, obj_size(cachep));
|
||||
|
||||
/*
|
||||
* Skip calling cache_free_alien() when the platform is not numa.
|
||||
* This will avoid cache misses that happen while accessing slabp (which
|
||||
|
|
Loading…
Reference in a new issue