mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
debugobjects: replace static objects when slab cache becomes available
Impact: refactor/consolidate object management, prepare for delayed free debugobjects allocates static reference objects to track objects which are initialized or activated before the slab cache becomes available. These static reference objects have to be handled seperately in free_object(). The handling of these objects is in the way of implementing a delayed free functionality. The delayed free is required to avoid callbacks into the mm code from debug_check_no_obj_freed(). Replace the static object references with dynamic ones after the slab cache has been initialized. The static objects are now marked initdata. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <200903162049.58058.nickpiggin@yahoo.com.au>
This commit is contained in:
parent
3e8ebb5c43
commit
1be1cb7b47
1 changed files with 63 additions and 3 deletions
|
@ -30,7 +30,7 @@ struct debug_bucket {
|
|||
|
||||
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
|
||||
|
||||
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE];
|
||||
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
|
||||
|
||||
static DEFINE_SPINLOCK(pool_lock);
|
||||
|
||||
|
@ -883,6 +883,63 @@ void __init debug_objects_early_init(void)
|
|||
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert the statically allocated objects to dynamic ones:
|
||||
*/
|
||||
static int debug_objects_replace_static_objects(void)
|
||||
{
|
||||
struct debug_bucket *db = obj_hash;
|
||||
struct hlist_node *node, *tmp;
|
||||
struct debug_obj *obj, *new;
|
||||
HLIST_HEAD(objects);
|
||||
int i, cnt = 0;
|
||||
|
||||
for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
|
||||
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
|
||||
if (!obj)
|
||||
goto free;
|
||||
hlist_add_head(&obj->node, &objects);
|
||||
}
|
||||
|
||||
/*
|
||||
* When debug_objects_mem_init() is called we know that only
|
||||
* one CPU is up, so disabling interrupts is enough
|
||||
* protection. This avoids the lockdep hell of lock ordering.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
/* Remove the statically allocated objects from the pool */
|
||||
hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
|
||||
hlist_del(&obj->node);
|
||||
/* Move the allocated objects to the pool */
|
||||
hlist_move_list(&objects, &obj_pool);
|
||||
|
||||
/* Replace the active object references */
|
||||
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
|
||||
hlist_move_list(&db->list, &objects);
|
||||
|
||||
hlist_for_each_entry(obj, node, &objects, node) {
|
||||
new = hlist_entry(obj_pool.first, typeof(*obj), node);
|
||||
hlist_del(&new->node);
|
||||
/* copy object data */
|
||||
*new = *obj;
|
||||
hlist_add_head(&new->node, &db->list);
|
||||
cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
|
||||
obj_pool_used);
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
free:
|
||||
hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
|
||||
hlist_del(&obj->node);
|
||||
kmem_cache_free(obj_cache, obj);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called after the kmem_caches are functional to setup a dedicated
|
||||
* cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
|
||||
|
@ -898,8 +955,11 @@ void __init debug_objects_mem_init(void)
|
|||
sizeof (struct debug_obj), 0,
|
||||
SLAB_DEBUG_OBJECTS, NULL);
|
||||
|
||||
if (!obj_cache)
|
||||
if (!obj_cache || debug_objects_replace_static_objects()) {
|
||||
debug_objects_enabled = 0;
|
||||
else
|
||||
if (obj_cache)
|
||||
kmem_cache_destroy(obj_cache);
|
||||
printk(KERN_WARNING "ODEBUG: out of memory.\n");
|
||||
} else
|
||||
debug_objects_selftest();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue