mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 03:06:10 +00:00
atomic: only take lock when the counter drops to zero on UP as well
_atomic_dec_and_lock() should not unconditionally take the lock before calling atomic_dec_and_test() in the UP case. For consistency reasons it should behave exactly like in the SMP case. Besides that this works around the problem that with CONFIG_DEBUG_SPINLOCK this spins in __spin_lock_debug() if the lock is already taken even if the counter doesn't drop to 0. Signed-off-by: Jan Blunck <jblunck@suse.de> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Nick Piggin <npiggin@suse.de> Cc: Valerie Aurora <vaurora@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a7d932af06
commit
417dcdf99e
1 changed files with 1 additions and 2 deletions
|
@ -19,11 +19,10 @@
|
||||||
*/
|
*/
|
||||||
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
|
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
|
||||||
if (atomic_add_unless(atomic, -1, 1))
|
if (atomic_add_unless(atomic, -1, 1))
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
/* Otherwise do it the slow way */
|
/* Otherwise do it the slow way */
|
||||||
spin_lock(lock);
|
spin_lock(lock);
|
||||||
if (atomic_dec_and_test(atomic))
|
if (atomic_dec_and_test(atomic))
|
||||||
|
|
Loading…
Reference in a new issue