mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
[PATCH] lockdep: core, reduce per-lock class-cache size
lockdep_map is embedded into every lock, which blows up data structure sizes all around the kernel. Reduce the class-cache to be for the default class only - that is used in 99.9% of the cases and even if we dont have a class cached, the lookup in the class-hash is lockless. This change reduces the per-lock dep_map overhead by 56 bytes on 64-bit platforms and by 28 bytes on 32-bit platforms. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
55794a412f
commit
d6d897cec2
2 changed files with 55 additions and 34 deletions
|
@ -120,7 +120,7 @@ struct lock_class {
|
|||
*/
|
||||
struct lockdep_map {
|
||||
struct lock_class_key *key;
|
||||
struct lock_class *class[MAX_LOCKDEP_SUBCLASSES];
|
||||
struct lock_class *class_cache;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
|
|
|
@ -1104,7 +1104,7 @@ extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
|
|||
* itself, so actual lookup of the hash should be once per lock object.
|
||||
*/
|
||||
static inline struct lock_class *
|
||||
register_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||
{
|
||||
struct lockdep_subclass_key *key;
|
||||
struct list_head *hash_head;
|
||||
|
@ -1148,7 +1148,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|||
*/
|
||||
list_for_each_entry(class, hash_head, hash_entry)
|
||||
if (class->key == key)
|
||||
goto out_set;
|
||||
return class;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register a lock's class in the hash-table, if the class is not present
|
||||
* yet. Otherwise we look it up. We cache the result in the lock object
|
||||
* itself, so actual lookup of the hash should be once per lock object.
|
||||
*/
|
||||
static inline struct lock_class *
|
||||
register_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
||||
{
|
||||
struct lockdep_subclass_key *key;
|
||||
struct list_head *hash_head;
|
||||
struct lock_class *class;
|
||||
|
||||
class = look_up_lock_class(lock, subclass);
|
||||
if (likely(class))
|
||||
return class;
|
||||
|
||||
/*
|
||||
* Debug-check: all keys must be persistent!
|
||||
|
@ -1163,6 +1182,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
key = lock->key->subkeys + subclass;
|
||||
hash_head = classhashentry(key);
|
||||
|
||||
__raw_spin_lock(&hash_lock);
|
||||
/*
|
||||
* We have to do the hash-walk again, to avoid races
|
||||
|
@ -1209,8 +1231,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|||
out_unlock_set:
|
||||
__raw_spin_unlock(&hash_lock);
|
||||
|
||||
out_set:
|
||||
lock->class[subclass] = class;
|
||||
if (!subclass)
|
||||
lock->class_cache = class;
|
||||
|
||||
DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
|
||||
|
||||
|
@ -1914,7 +1936,7 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
|
|||
}
|
||||
lock->name = name;
|
||||
lock->key = key;
|
||||
memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES);
|
||||
lock->class_cache = NULL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(lockdep_init_map);
|
||||
|
@ -1928,8 +1950,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct lock_class *class = NULL;
|
||||
struct held_lock *hlock;
|
||||
struct lock_class *class;
|
||||
unsigned int depth, id;
|
||||
int chain_head = 0;
|
||||
u64 chain_key;
|
||||
|
@ -1947,8 +1969,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
return 0;
|
||||
}
|
||||
|
||||
class = lock->class[subclass];
|
||||
/* not cached yet? */
|
||||
if (!subclass)
|
||||
class = lock->class_cache;
|
||||
/*
|
||||
* Not cached yet or subclass?
|
||||
*/
|
||||
if (unlikely(!class)) {
|
||||
class = register_lock_class(lock, subclass);
|
||||
if (!class)
|
||||
|
@ -2449,48 +2474,44 @@ void lockdep_free_key_range(void *start, unsigned long size)
|
|||
|
||||
void lockdep_reset_lock(struct lockdep_map *lock)
|
||||
{
|
||||
struct lock_class *class, *next, *entry;
|
||||
struct lock_class *class, *next;
|
||||
struct list_head *head;
|
||||
unsigned long flags;
|
||||
int i, j;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
__raw_spin_lock(&hash_lock);
|
||||
|
||||
/*
|
||||
* Remove all classes this lock has:
|
||||
* Remove all classes this lock might have:
|
||||
*/
|
||||
for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
|
||||
/*
|
||||
* If the class exists we look it up and zap it:
|
||||
*/
|
||||
class = look_up_lock_class(lock, j);
|
||||
if (class)
|
||||
zap_class(class);
|
||||
}
|
||||
/*
|
||||
* Debug check: in the end all mapped classes should
|
||||
* be gone.
|
||||
*/
|
||||
__raw_spin_lock(&hash_lock);
|
||||
for (i = 0; i < CLASSHASH_SIZE; i++) {
|
||||
head = classhash_table + i;
|
||||
if (list_empty(head))
|
||||
continue;
|
||||
list_for_each_entry_safe(class, next, head, hash_entry) {
|
||||
for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
|
||||
entry = lock->class[j];
|
||||
if (class == entry) {
|
||||
zap_class(class);
|
||||
lock->class[j] = NULL;
|
||||
break;
|
||||
}
|
||||
if (unlikely(class == lock->class_cache)) {
|
||||
__raw_spin_unlock(&hash_lock);
|
||||
DEBUG_LOCKS_WARN_ON(1);
|
||||
goto out_restore;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Debug check: in the end all mapped classes should
|
||||
* be gone.
|
||||
*/
|
||||
for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
|
||||
entry = lock->class[j];
|
||||
if (!entry)
|
||||
continue;
|
||||
__raw_spin_unlock(&hash_lock);
|
||||
DEBUG_LOCKS_WARN_ON(1);
|
||||
raw_local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
|
||||
__raw_spin_unlock(&hash_lock);
|
||||
|
||||
out_restore:
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue