mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6
* 'kmemleak' of git://linux-arm.org/linux-2.6: kmemleak: Fix some typos in comments kmemleak: Rename kmemleak_panic to kmemleak_stop kmemleak: Only use GFP_KERNEL|GFP_ATOMIC for the internal allocations
This commit is contained in:
commit
3fe0344faf
1 changed files with 15 additions and 12 deletions
|
@ -109,6 +109,9 @@
|
||||||
|
|
||||||
#define BYTES_PER_POINTER sizeof(void *)
|
#define BYTES_PER_POINTER sizeof(void *)
|
||||||
|
|
||||||
|
/* GFP bitmask for kmemleak internal allocations */
|
||||||
|
#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC)
|
||||||
|
|
||||||
/* scanning area inside a memory block */
|
/* scanning area inside a memory block */
|
||||||
struct kmemleak_scan_area {
|
struct kmemleak_scan_area {
|
||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
|
@ -199,9 +202,9 @@ static DEFINE_MUTEX(kmemleak_mutex);
|
||||||
static int reported_leaks;
|
static int reported_leaks;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Early object allocation/freeing logging. Kkmemleak is initialized after the
|
* Early object allocation/freeing logging. Kmemleak is initialized after the
|
||||||
* kernel allocator. However, both the kernel allocator and kmemleak may
|
* kernel allocator. However, both the kernel allocator and kmemleak may
|
||||||
* allocate memory blocks which need to be tracked. Kkmemleak defines an
|
* allocate memory blocks which need to be tracked. Kmemleak defines an
|
||||||
* arbitrary buffer to hold the allocation/freeing information before it is
|
* arbitrary buffer to hold the allocation/freeing information before it is
|
||||||
* fully initialized.
|
* fully initialized.
|
||||||
*/
|
*/
|
||||||
|
@ -245,10 +248,10 @@ static void kmemleak_disable(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Macro invoked when a serious kmemleak condition occured and cannot be
|
* Macro invoked when a serious kmemleak condition occured and cannot be
|
||||||
* recovered from. Kkmemleak will be disabled and further allocation/freeing
|
* recovered from. Kmemleak will be disabled and further allocation/freeing
|
||||||
* tracing no longer available.
|
* tracing no longer available.
|
||||||
*/
|
*/
|
||||||
#define kmemleak_panic(x...) do { \
|
#define kmemleak_stop(x...) do { \
|
||||||
kmemleak_warn(x); \
|
kmemleak_warn(x); \
|
||||||
kmemleak_disable(); \
|
kmemleak_disable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -462,10 +465,10 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
|
||||||
struct prio_tree_node *node;
|
struct prio_tree_node *node;
|
||||||
struct stack_trace trace;
|
struct stack_trace trace;
|
||||||
|
|
||||||
object = kmem_cache_alloc(object_cache, gfp & ~GFP_SLAB_BUG_MASK);
|
object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
|
||||||
if (!object) {
|
if (!object) {
|
||||||
kmemleak_panic("kmemleak: Cannot allocate a kmemleak_object "
|
kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object "
|
||||||
"structure\n");
|
"structure\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,8 +527,8 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
|
||||||
if (node != &object->tree_node) {
|
if (node != &object->tree_node) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
kmemleak_panic("kmemleak: Cannot insert 0x%lx into the object "
|
kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object "
|
||||||
"search tree (already existing)\n", ptr);
|
"search tree (already existing)\n", ptr);
|
||||||
object = lookup_object(ptr, 1);
|
object = lookup_object(ptr, 1);
|
||||||
spin_lock_irqsave(&object->lock, flags);
|
spin_lock_irqsave(&object->lock, flags);
|
||||||
dump_object_info(object);
|
dump_object_info(object);
|
||||||
|
@ -636,7 +639,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
area = kmem_cache_alloc(scan_area_cache, gfp & ~GFP_SLAB_BUG_MASK);
|
area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
|
||||||
if (!area) {
|
if (!area) {
|
||||||
kmemleak_warn("kmemleak: Cannot allocate a scan area\n");
|
kmemleak_warn("kmemleak: Cannot allocate a scan area\n");
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -696,7 +699,7 @@ static void log_early(int op_type, const void *ptr, size_t size,
|
||||||
struct early_log *log;
|
struct early_log *log;
|
||||||
|
|
||||||
if (crt_early_log >= ARRAY_SIZE(early_log)) {
|
if (crt_early_log >= ARRAY_SIZE(early_log)) {
|
||||||
kmemleak_panic("kmemleak: Early log buffer exceeded\n");
|
kmemleak_stop("kmemleak: Early log buffer exceeded\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1404,7 +1407,7 @@ static int kmemleak_boot_config(char *str)
|
||||||
early_param("kmemleak", kmemleak_boot_config);
|
early_param("kmemleak", kmemleak_boot_config);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kkmemleak initialization.
|
* Kmemleak initialization.
|
||||||
*/
|
*/
|
||||||
void __init kmemleak_init(void)
|
void __init kmemleak_init(void)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in a new issue