mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: spinlock: Add missing __raw_spin_lock_flags() stub for UP mutex: add atomic_dec_and_mutex_lock(), fix locking, rtmutex.c: Documentation cleanup mutex: add atomic_dec_and_mutex_lock()
This commit is contained in:
commit
e7241d7714
4 changed files with 30 additions and 5 deletions
|
@ -150,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
|
|||
*/
|
||||
extern int mutex_trylock(struct mutex *lock);
|
||||
extern void mutex_unlock(struct mutex *lock);
|
||||
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|||
#define __raw_spin_is_locked(lock) ((void)(lock), 0)
|
||||
/* for sched.c and kernel_lock.c: */
|
||||
# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
|
||||
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
|
||||
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
|
||||
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
|
||||
#endif /* DEBUG_SPINLOCK */
|
||||
|
|
|
@ -473,5 +473,28 @@ int __sched mutex_trylock(struct mutex *lock)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mutex_trylock);
|
||||
|
||||
/**
|
||||
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
|
||||
* @cnt: the atomic which we are to dec
|
||||
* @lock: the mutex to return holding if we dec to 0
|
||||
*
|
||||
* return true and hold lock if we dec to 0, return false otherwise
|
||||
*/
|
||||
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
|
||||
{
|
||||
/* dec if we can't possibly hit 0 */
|
||||
if (atomic_add_unless(cnt, -1, 1))
|
||||
return 0;
|
||||
/* we might hit 0, so take the lock */
|
||||
mutex_lock(lock);
|
||||
if (!atomic_dec_and_test(cnt)) {
|
||||
/* when we actually did the dec, we didn't hit 0 */
|
||||
mutex_unlock(lock);
|
||||
return 0;
|
||||
}
|
||||
/* we hit 0, and we hold the lock */
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
|
||||
|
|
|
@ -891,9 +891,9 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
|
|||
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
|
||||
|
||||
/**
|
||||
* rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
|
||||
* the timeout structure is provided
|
||||
* by the caller
|
||||
* rt_mutex_timed_lock - lock a rt_mutex interruptible
|
||||
* the timeout structure is provided
|
||||
* by the caller
|
||||
*
|
||||
* @lock: the rt_mutex to be locked
|
||||
* @timeout: timeout structure or NULL (no timeout)
|
||||
|
@ -940,7 +940,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
||||
|
||||
/***
|
||||
/**
|
||||
* rt_mutex_destroy - mark a mutex unusable
|
||||
* @lock: the mutex to be destroyed
|
||||
*
|
||||
|
|
Loading…
Reference in a new issue