mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
kmemleak: Simplify the kmemleak_scan_area() function prototype
This function was taking non-necessary arguments which can be determined by kmemleak. The patch also modifies the calling sites. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
e7cb55b946
commit
c017b4be3e
4 changed files with 27 additions and 39 deletions
|
@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
|
||||||
size_t size) __ref;
|
size_t size) __ref;
|
||||||
extern void kmemleak_not_leak(const void *ptr) __ref;
|
extern void kmemleak_not_leak(const void *ptr) __ref;
|
||||||
extern void kmemleak_ignore(const void *ptr) __ref;
|
extern void kmemleak_ignore(const void *ptr) __ref;
|
||||||
extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
|
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
|
||||||
size_t length, gfp_t gfp) __ref;
|
|
||||||
extern void kmemleak_no_scan(const void *ptr) __ref;
|
extern void kmemleak_no_scan(const void *ptr) __ref;
|
||||||
|
|
||||||
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
||||||
|
@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr)
|
||||||
static inline void kmemleak_ignore(const void *ptr)
|
static inline void kmemleak_ignore(const void *ptr)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
|
static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
|
||||||
size_t length, gfp_t gfp)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void kmemleak_erase(void **ptr)
|
static inline void kmemleak_erase(void **ptr)
|
||||||
|
|
|
@ -2043,9 +2043,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
/* only scan the sections containing data */
|
/* only scan the sections containing data */
|
||||||
kmemleak_scan_area(mod->module_core, (unsigned long)mod -
|
kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
|
||||||
(unsigned long)mod->module_core,
|
|
||||||
sizeof(struct module), GFP_KERNEL);
|
|
||||||
|
|
||||||
for (i = 1; i < hdr->e_shnum; i++) {
|
for (i = 1; i < hdr->e_shnum; i++) {
|
||||||
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
|
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
|
||||||
|
@ -2054,8 +2052,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
|
||||||
&& strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
|
&& strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
|
kmemleak_scan_area((void *)sechdrs[i].sh_addr,
|
||||||
(unsigned long)mod->module_core,
|
|
||||||
sechdrs[i].sh_size, GFP_KERNEL);
|
sechdrs[i].sh_size, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,8 +119,8 @@
|
||||||
/* scanning area inside a memory block */
|
/* scanning area inside a memory block */
|
||||||
struct kmemleak_scan_area {
|
struct kmemleak_scan_area {
|
||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
unsigned long offset;
|
unsigned long start;
|
||||||
size_t length;
|
size_t size;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define KMEMLEAK_GREY 0
|
#define KMEMLEAK_GREY 0
|
||||||
|
@ -241,8 +241,6 @@ struct early_log {
|
||||||
const void *ptr; /* allocated/freed memory block */
|
const void *ptr; /* allocated/freed memory block */
|
||||||
size_t size; /* memory block size */
|
size_t size; /* memory block size */
|
||||||
int min_count; /* minimum reference count */
|
int min_count; /* minimum reference count */
|
||||||
unsigned long offset; /* scan area offset */
|
|
||||||
size_t length; /* scan area length */
|
|
||||||
unsigned long trace[MAX_TRACE]; /* stack trace */
|
unsigned long trace[MAX_TRACE]; /* stack trace */
|
||||||
unsigned int trace_len; /* stack trace length */
|
unsigned int trace_len; /* stack trace length */
|
||||||
};
|
};
|
||||||
|
@ -720,14 +718,13 @@ static void make_black_object(unsigned long ptr)
|
||||||
* Add a scanning area to the object. If at least one such area is added,
|
* Add a scanning area to the object. If at least one such area is added,
|
||||||
* kmemleak will only scan these ranges rather than the whole memory block.
|
* kmemleak will only scan these ranges rather than the whole memory block.
|
||||||
*/
|
*/
|
||||||
static void add_scan_area(unsigned long ptr, unsigned long offset,
|
static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
|
||||||
size_t length, gfp_t gfp)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct kmemleak_object *object;
|
struct kmemleak_object *object;
|
||||||
struct kmemleak_scan_area *area;
|
struct kmemleak_scan_area *area;
|
||||||
|
|
||||||
object = find_and_get_object(ptr, 0);
|
object = find_and_get_object(ptr, 1);
|
||||||
if (!object) {
|
if (!object) {
|
||||||
kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
|
kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
|
||||||
ptr);
|
ptr);
|
||||||
|
@ -741,7 +738,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&object->lock, flags);
|
spin_lock_irqsave(&object->lock, flags);
|
||||||
if (offset + length > object->size) {
|
if (ptr + size > object->pointer + object->size) {
|
||||||
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
|
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
|
||||||
dump_object_info(object);
|
dump_object_info(object);
|
||||||
kmem_cache_free(scan_area_cache, area);
|
kmem_cache_free(scan_area_cache, area);
|
||||||
|
@ -749,8 +746,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_HLIST_NODE(&area->node);
|
INIT_HLIST_NODE(&area->node);
|
||||||
area->offset = offset;
|
area->start = ptr;
|
||||||
area->length = length;
|
area->size = size;
|
||||||
|
|
||||||
hlist_add_head(&area->node, &object->area_list);
|
hlist_add_head(&area->node, &object->area_list);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
|
@ -786,7 +783,7 @@ static void object_no_scan(unsigned long ptr)
|
||||||
* processed later once kmemleak is fully initialized.
|
* processed later once kmemleak is fully initialized.
|
||||||
*/
|
*/
|
||||||
static void __init log_early(int op_type, const void *ptr, size_t size,
|
static void __init log_early(int op_type, const void *ptr, size_t size,
|
||||||
int min_count, unsigned long offset, size_t length)
|
int min_count)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct early_log *log;
|
struct early_log *log;
|
||||||
|
@ -808,8 +805,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
|
||||||
log->ptr = ptr;
|
log->ptr = ptr;
|
||||||
log->size = size;
|
log->size = size;
|
||||||
log->min_count = min_count;
|
log->min_count = min_count;
|
||||||
log->offset = offset;
|
|
||||||
log->length = length;
|
|
||||||
if (op_type == KMEMLEAK_ALLOC)
|
if (op_type == KMEMLEAK_ALLOC)
|
||||||
log->trace_len = __save_stack_trace(log->trace);
|
log->trace_len = __save_stack_trace(log->trace);
|
||||||
crt_early_log++;
|
crt_early_log++;
|
||||||
|
@ -858,7 +853,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
create_object((unsigned long)ptr, size, min_count, gfp);
|
create_object((unsigned long)ptr, size, min_count, gfp);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
|
log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kmemleak_alloc);
|
EXPORT_SYMBOL_GPL(kmemleak_alloc);
|
||||||
|
|
||||||
|
@ -873,7 +868,7 @@ void __ref kmemleak_free(const void *ptr)
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
delete_object_full((unsigned long)ptr);
|
delete_object_full((unsigned long)ptr);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
|
log_early(KMEMLEAK_FREE, ptr, 0, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kmemleak_free);
|
EXPORT_SYMBOL_GPL(kmemleak_free);
|
||||||
|
|
||||||
|
@ -888,7 +883,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
delete_object_part((unsigned long)ptr, size);
|
delete_object_part((unsigned long)ptr, size);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
|
log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kmemleak_free_part);
|
EXPORT_SYMBOL_GPL(kmemleak_free_part);
|
||||||
|
|
||||||
|
@ -903,7 +898,7 @@ void __ref kmemleak_not_leak(const void *ptr)
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
make_gray_object((unsigned long)ptr);
|
make_gray_object((unsigned long)ptr);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
|
log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmemleak_not_leak);
|
EXPORT_SYMBOL(kmemleak_not_leak);
|
||||||
|
|
||||||
|
@ -919,22 +914,21 @@ void __ref kmemleak_ignore(const void *ptr)
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
make_black_object((unsigned long)ptr);
|
make_black_object((unsigned long)ptr);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
|
log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmemleak_ignore);
|
EXPORT_SYMBOL(kmemleak_ignore);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Limit the range to be scanned in an allocated memory block.
|
* Limit the range to be scanned in an allocated memory block.
|
||||||
*/
|
*/
|
||||||
void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
|
void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
|
||||||
size_t length, gfp_t gfp)
|
|
||||||
{
|
{
|
||||||
pr_debug("%s(0x%p)\n", __func__, ptr);
|
pr_debug("%s(0x%p)\n", __func__, ptr);
|
||||||
|
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
add_scan_area((unsigned long)ptr, offset, length, gfp);
|
add_scan_area((unsigned long)ptr, size, gfp);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
|
log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmemleak_scan_area);
|
EXPORT_SYMBOL(kmemleak_scan_area);
|
||||||
|
|
||||||
|
@ -948,7 +942,7 @@ void __ref kmemleak_no_scan(const void *ptr)
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
object_no_scan((unsigned long)ptr);
|
object_no_scan((unsigned long)ptr);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
|
log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmemleak_no_scan);
|
EXPORT_SYMBOL(kmemleak_no_scan);
|
||||||
|
|
||||||
|
@ -1075,9 +1069,9 @@ static void scan_object(struct kmemleak_object *object)
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
hlist_for_each_entry(area, elem, &object->area_list, node)
|
hlist_for_each_entry(area, elem, &object->area_list, node)
|
||||||
scan_block((void *)(object->pointer + area->offset),
|
scan_block((void *)area->start,
|
||||||
(void *)(object->pointer + area->offset
|
(void *)(area->start + area->size),
|
||||||
+ area->length), object, 0);
|
object, 0);
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&object->lock, flags);
|
spin_unlock_irqrestore(&object->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1642,8 +1636,7 @@ void __init kmemleak_init(void)
|
||||||
kmemleak_ignore(log->ptr);
|
kmemleak_ignore(log->ptr);
|
||||||
break;
|
break;
|
||||||
case KMEMLEAK_SCAN_AREA:
|
case KMEMLEAK_SCAN_AREA:
|
||||||
kmemleak_scan_area(log->ptr, log->offset, log->length,
|
kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
|
||||||
GFP_KERNEL);
|
|
||||||
break;
|
break;
|
||||||
case KMEMLEAK_NO_SCAN:
|
case KMEMLEAK_NO_SCAN:
|
||||||
kmemleak_no_scan(log->ptr);
|
kmemleak_no_scan(log->ptr);
|
||||||
|
|
|
@ -2584,8 +2584,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
|
||||||
* kmemleak does not treat the ->s_mem pointer as a reference
|
* kmemleak does not treat the ->s_mem pointer as a reference
|
||||||
* to the object. Otherwise we will not report the leak.
|
* to the object. Otherwise we will not report the leak.
|
||||||
*/
|
*/
|
||||||
kmemleak_scan_area(slabp, offsetof(struct slab, list),
|
kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
|
||||||
sizeof(struct list_head), local_flags);
|
local_flags);
|
||||||
if (!slabp)
|
if (!slabp)
|
||||||
return NULL;
|
return NULL;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in a new issue