mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
lockdep: map_acquire
Most the free-standing lock_acquire() usages look remarkably similar, sweep them into a new helper. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f82b217e35
commit
4f3e7524b2
4 changed files with 28 additions and 16 deletions
|
@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
|
|||
goto out;
|
||||
}
|
||||
|
||||
lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
map_acquire(&handle->h_lockdep_map);
|
||||
|
||||
out:
|
||||
return handle;
|
||||
|
@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
|
|||
spin_unlock(&journal->j_state_lock);
|
||||
}
|
||||
|
||||
lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
|
||||
map_release(&handle->h_lockdep_map);
|
||||
|
||||
jbd_free_handle(handle);
|
||||
return err;
|
||||
|
|
|
@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
|
|||
goto out;
|
||||
}
|
||||
|
||||
lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
map_acquire(&handle->h_lockdep_map);
|
||||
out:
|
||||
return handle;
|
||||
}
|
||||
|
@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
|
|||
spin_unlock(&journal->j_state_lock);
|
||||
}
|
||||
|
||||
lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
|
||||
map_release(&handle->h_lockdep_map);
|
||||
|
||||
jbd2_free_handle(handle);
|
||||
return err;
|
||||
|
|
|
@ -459,4 +459,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
|||
# define rwsem_release(l, n, i) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, _THIS_IP_)
|
||||
# else
|
||||
# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, _THIS_IP_)
|
||||
# endif
|
||||
# define map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||
#else
|
||||
# define map_acquire(l) do { } while (0)
|
||||
# define map_release(l) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_LOCKDEP_H */
|
||||
|
|
|
@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
|
|||
|
||||
BUG_ON(get_wq_data(work) != cwq);
|
||||
work_clear_pending(work);
|
||||
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
map_acquire(&cwq->wq->lockdep_map);
|
||||
map_acquire(&lockdep_map);
|
||||
f(work);
|
||||
lock_release(&lockdep_map, 1, _THIS_IP_);
|
||||
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
|
||||
map_release(&lockdep_map);
|
||||
map_release(&cwq->wq->lockdep_map);
|
||||
|
||||
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
|
||||
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
|
||||
|
@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
|
|||
int cpu;
|
||||
|
||||
might_sleep();
|
||||
lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
lock_release(&wq->lockdep_map, 1, _THIS_IP_);
|
||||
map_acquire(&wq->lockdep_map);
|
||||
map_release(&wq->lockdep_map);
|
||||
for_each_cpu_mask_nr(cpu, *cpu_map)
|
||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
||||
}
|
||||
|
@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
|
|||
if (!cwq)
|
||||
return 0;
|
||||
|
||||
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
|
||||
map_acquire(&cwq->wq->lockdep_map);
|
||||
map_release(&cwq->wq->lockdep_map);
|
||||
|
||||
prev = NULL;
|
||||
spin_lock_irq(&cwq->lock);
|
||||
|
@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
|
|||
|
||||
might_sleep();
|
||||
|
||||
lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
lock_release(&work->lockdep_map, 1, _THIS_IP_);
|
||||
map_acquire(&work->lockdep_map);
|
||||
map_release(&work->lockdep_map);
|
||||
|
||||
cwq = get_wq_data(work);
|
||||
if (!cwq)
|
||||
|
@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
|
|||
if (cwq->thread == NULL)
|
||||
return;
|
||||
|
||||
lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
|
||||
lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
|
||||
map_acquire(&cwq->wq->lockdep_map);
|
||||
map_release(&cwq->wq->lockdep_map);
|
||||
|
||||
flush_cpu_workqueue(cwq);
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue