mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
locking: Convert raw_spinlock to arch_spinlock
The raw_spin* namespace was taken by lockdep for the architecture specific implementations. raw_spin_* would be the ideal name space for the spinlocks which are not converted to sleeping locks in preempt-rt. Linus suggested to convert the raw_ to arch_ locks and cleanup the name space instead of using an artifical name like core_spin, atomic_spin or whatever No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
This commit is contained in:
parent
6b6b4792f8
commit
445c89514b
51 changed files with 164 additions and 164 deletions
|
@ -17,13 +17,13 @@
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define __raw_spin_unlock_wait(x) \
|
||||||
do { cpu_relax(); } while ((x)->lock)
|
do { cpu_relax(); } while ((x)->lock)
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t * lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t * lock)
|
||||||
{
|
{
|
||||||
mb();
|
mb();
|
||||||
lock->lock = 0;
|
lock->lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t * lock)
|
static inline void __raw_spin_lock(arch_spinlock_t * lock)
|
||||||
{
|
{
|
||||||
long tmp;
|
long tmp;
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
|
||||||
: "m"(lock->lock) : "memory");
|
: "m"(lock->lock) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return !test_and_set_bit(0, &lock->lock);
|
return !test_and_set_bit(0, &lock->lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
smp_mb();
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr);
|
||||||
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
|
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
|
||||||
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
|
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __raw_spin_is_locked_asm(&lock->lock);
|
return __raw_spin_is_locked_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__raw_spin_lock_asm(&lock->lock);
|
__raw_spin_lock_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __raw_spin_trylock_asm(&lock->lock);
|
return __raw_spin_trylock_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__raw_spin_unlock_asm(&lock->lock);
|
__raw_spin_unlock_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (__raw_spin_is_locked(lock))
|
while (__raw_spin_is_locked(lock))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
|
||||||
extern void cris_spin_lock(void *l);
|
extern void cris_spin_lock(void *l);
|
||||||
extern int cris_spin_trylock(void *l);
|
extern int cris_spin_trylock(void *l);
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
static inline int __raw_spin_is_locked(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
return *(volatile signed char *)(&(x)->slock) <= 0;
|
return *(volatile signed char *)(&(x)->slock) <= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ volatile ("move.d %1,%0" \
|
__asm__ volatile ("move.d %1,%0" \
|
||||||
: "=m" (lock->slock) \
|
: "=m" (lock->slock) \
|
||||||
|
@ -22,24 +22,24 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (__raw_spin_is_locked(lock))
|
while (__raw_spin_is_locked(lock))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return cris_spin_trylock((void *)&lock->slock);
|
return cris_spin_trylock((void *)&lock->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
cris_spin_lock((void *)&lock->slock);
|
cris_spin_lock((void *)&lock->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
__raw_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
#define TICKET_BITS 15
|
#define TICKET_BITS 15
|
||||||
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
|
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int *p = (int *)&lock->lock, ticket, serve;
|
int *p = (int *)&lock->lock, ticket, serve;
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp = ACCESS_ONCE(lock->lock);
|
int tmp = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||||
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int *p = (int *)&lock->lock, ticket;
|
int *p = (int *)&lock->lock, ticket;
|
||||||
|
|
||||||
|
@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
long tmp = ACCESS_ONCE(lock->lock);
|
long tmp = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
|
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
long tmp = ACCESS_ONCE(lock->lock);
|
long tmp = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
|
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_locked(lock);
|
return __ticket_spin_is_locked(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_contended(lock);
|
return __ticket_spin_is_contended(lock);
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_lock(lock);
|
__ticket_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_trylock(lock);
|
return __ticket_spin_trylock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_unlock(lock);
|
__ticket_spin_unlock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
__raw_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_unlock_wait(lock);
|
__ticket_spin_unlock_wait(lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
* __raw_spin_trylock() tries to get the lock and returns a result.
|
* __raw_spin_trylock() tries to get the lock and returns a result.
|
||||||
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
|
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
|
||||||
*/
|
*/
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int oldval;
|
int oldval;
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return (oldval > 0);
|
return (oldval > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp0, tmp1;
|
unsigned long tmp0, tmp1;
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
mb();
|
mb();
|
||||||
lock->slock = 1;
|
lock->slock = 1;
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile int slock;
|
volatile int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
* becomes equal to the the initial value of the tail.
|
* becomes equal to the the initial value of the tail.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int counters = ACCESS_ONCE(lock->lock);
|
unsigned int counters = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define __raw_spin_unlock_wait(x) \
|
||||||
while (__raw_spin_is_locked(x)) { cpu_relax(); }
|
while (__raw_spin_is_locked(x)) { cpu_relax(); }
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int counters = ACCESS_ONCE(lock->lock);
|
unsigned int counters = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int my_ticket;
|
int my_ticket;
|
||||||
int tmp;
|
int tmp;
|
||||||
|
@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
smp_llsc_mb();
|
smp_llsc_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
|
|
||||||
|
@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp, tmp2, tmp3;
|
int tmp, tmp2, tmp3;
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ typedef struct {
|
||||||
* bits 15..28: ticket
|
* bits 15..28: ticket
|
||||||
*/
|
*/
|
||||||
unsigned int lock;
|
unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -27,18 +27,18 @@
|
||||||
# define ATOMIC_HASH_SIZE 4
|
# define ATOMIC_HASH_SIZE 4
|
||||||
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
||||||
|
|
||||||
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
||||||
|
|
||||||
/* Can't use raw_spin_lock_irq because of #include problems, so
|
/* Can't use raw_spin_lock_irq because of #include problems, so
|
||||||
* this is the substitute */
|
* this is the substitute */
|
||||||
#define _atomic_spin_lock_irqsave(l,f) do { \
|
#define _atomic_spin_lock_irqsave(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
local_irq_save(f); \
|
local_irq_save(f); \
|
||||||
__raw_spin_lock(s); \
|
__raw_spin_lock(s); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
__raw_spin_unlock(s); \
|
__raw_spin_unlock(s); \
|
||||||
local_irq_restore(f); \
|
local_irq_restore(f); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/spinlock_types.h>
|
#include <asm/spinlock_types.h>
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
static inline int __raw_spin_is_locked(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a = __ldcw_align(x);
|
volatile unsigned int *a = __ldcw_align(x);
|
||||||
return *a == 0;
|
return *a == 0;
|
||||||
|
@ -15,7 +15,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define __raw_spin_unlock_wait(x) \
|
||||||
do { cpu_relax(); } while (__raw_spin_is_locked(x))
|
do { cpu_relax(); } while (__raw_spin_is_locked(x))
|
||||||
|
|
||||||
static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
|
static inline void __raw_spin_lock_flags(arch_spinlock_t *x,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
|
@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *x)
|
static inline void __raw_spin_unlock(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
mb();
|
mb();
|
||||||
|
@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *x)
|
static inline int __raw_spin_trylock(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
|
@ -9,10 +9,10 @@ typedef struct {
|
||||||
volatile unsigned int lock[4];
|
volatile unsigned int lock[4];
|
||||||
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||||
#endif
|
#endif
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
raw_spinlock_t lock;
|
arch_spinlock_t lock;
|
||||||
volatile int counter;
|
volatile int counter;
|
||||||
} raw_rwlock_t;
|
} raw_rwlock_t;
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
|
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
|
||||||
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
|
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -58,7 +58,7 @@ struct rtas_t {
|
||||||
unsigned long entry; /* physical address pointer */
|
unsigned long entry; /* physical address pointer */
|
||||||
unsigned long base; /* physical address pointer */
|
unsigned long base; /* physical address pointer */
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
raw_spinlock_t lock;
|
arch_spinlock_t lock;
|
||||||
struct rtas_args args;
|
struct rtas_args args;
|
||||||
struct device_node *dev; /* virtual address pointer */
|
struct device_node *dev; /* virtual address pointer */
|
||||||
};
|
};
|
||||||
|
|
|
@ -54,7 +54,7 @@
|
||||||
* This returns the old value in the lock, so we succeeded
|
* This returns the old value in the lock, so we succeeded
|
||||||
* in getting the lock if the return value is 0.
|
* in getting the lock if the return value is 0.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
|
static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp, token;
|
unsigned long tmp, token;
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
CLEAR_IO_SYNC;
|
CLEAR_IO_SYNC;
|
||||||
return arch_spin_trylock(lock) == 0;
|
return arch_spin_trylock(lock) == 0;
|
||||||
|
@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
|
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
|
||||||
/* We only yield to the hypervisor if we are in shared processor mode */
|
/* We only yield to the hypervisor if we are in shared processor mode */
|
||||||
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
|
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
|
||||||
extern void __spin_yield(raw_spinlock_t *lock);
|
extern void __spin_yield(arch_spinlock_t *lock);
|
||||||
extern void __rw_yield(raw_rwlock_t *lock);
|
extern void __rw_yield(raw_rwlock_t *lock);
|
||||||
#else /* SPLPAR || ISERIES */
|
#else /* SPLPAR || ISERIES */
|
||||||
#define __spin_yield(x) barrier()
|
#define __spin_yield(x) barrier()
|
||||||
|
@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *lock);
|
||||||
#define SHARED_PROCESSOR 0
|
#define SHARED_PROCESSOR 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
CLEAR_IO_SYNC;
|
CLEAR_IO_SYNC;
|
||||||
while (1) {
|
while (1) {
|
||||||
|
@ -120,7 +120,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
unsigned long flags_dis;
|
unsigned long flags_dis;
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
SYNC_IO;
|
SYNC_IO;
|
||||||
__asm__ __volatile__("# __raw_spin_unlock\n\t"
|
__asm__ __volatile__("# __raw_spin_unlock\n\t"
|
||||||
|
@ -149,7 +149,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
|
extern void __raw_spin_unlock_wait(arch_spinlock_t *lock);
|
||||||
#else
|
#else
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define __raw_spin_unlock_wait(lock) \
|
||||||
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int slock;
|
volatile unsigned int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static raw_spinlock_t timebase_lock;
|
static arch_spinlock_t timebase_lock;
|
||||||
static u64 timebase = 0;
|
static u64 timebase = 0;
|
||||||
|
|
||||||
void __cpuinit rtas_give_timebase(void)
|
void __cpuinit rtas_give_timebase(void)
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/firmware.h>
|
#include <asm/firmware.h>
|
||||||
|
|
||||||
void __spin_yield(raw_spinlock_t *lock)
|
void __spin_yield(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int lock_value, holder_cpu, yield_count;
|
unsigned int lock_value, holder_cpu, yield_count;
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
void __raw_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (lock->slock) {
|
while (lock->slock) {
|
||||||
HMT_low();
|
HMT_low();
|
||||||
|
|
|
@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static raw_spinlock_t timebase_lock;
|
static arch_spinlock_t timebase_lock;
|
||||||
static unsigned long timebase;
|
static unsigned long timebase;
|
||||||
|
|
||||||
static void __devinit pas_give_timebase(void)
|
static void __devinit pas_give_timebase(void)
|
||||||
|
|
|
@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock,
|
||||||
do { while (__raw_spin_is_locked(lock)) \
|
do { while (__raw_spin_is_locked(lock)) \
|
||||||
_raw_spin_relax(lock); } while (0)
|
_raw_spin_relax(lock); } while (0)
|
||||||
|
|
||||||
extern void _raw_spin_lock_wait(raw_spinlock_t *);
|
extern void _raw_spin_lock_wait(arch_spinlock_t *);
|
||||||
extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
|
extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
||||||
extern int _raw_spin_trylock_retry(raw_spinlock_t *);
|
extern int _raw_spin_trylock_retry(arch_spinlock_t *);
|
||||||
extern void _raw_spin_relax(raw_spinlock_t *lock);
|
extern void _raw_spin_relax(arch_spinlock_t *lock);
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lp)
|
static inline void __raw_spin_lock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
int old;
|
int old;
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp)
|
||||||
_raw_spin_lock_wait(lp);
|
_raw_spin_lock_wait(lp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
|
static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
int old;
|
int old;
|
||||||
|
@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
|
||||||
_raw_spin_lock_wait_flags(lp, flags);
|
_raw_spin_lock_wait_flags(lp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lp)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
int old;
|
int old;
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp)
|
||||||
return _raw_spin_trylock_retry(lp);
|
return _raw_spin_trylock_retry(lp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lp)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
|
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int owner_cpu;
|
volatile unsigned int owner_cpu;
|
||||||
} __attribute__ ((aligned (4))) raw_spinlock_t;
|
} __attribute__ ((aligned (4))) arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
|
||||||
_raw_yield();
|
_raw_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
void _raw_spin_lock_wait(raw_spinlock_t *lp)
|
void _raw_spin_lock_wait(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_lock_wait);
|
EXPORT_SYMBOL(_raw_spin_lock_wait);
|
||||||
|
|
||||||
void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
|
void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
|
EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
|
||||||
|
|
||||||
int _raw_spin_trylock_retry(raw_spinlock_t *lp)
|
int _raw_spin_trylock_retry(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
int count;
|
int count;
|
||||||
|
@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_trylock_retry);
|
EXPORT_SYMBOL(_raw_spin_trylock_retry);
|
||||||
|
|
||||||
void _raw_spin_relax(raw_spinlock_t *lock)
|
void _raw_spin_relax(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int cpu = lock->owner_cpu;
|
unsigned int cpu = lock->owner_cpu;
|
||||||
if (cpu != 0)
|
if (cpu != 0)
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
*
|
*
|
||||||
* We make no fairness assumptions. They have a cost.
|
* We make no fairness assumptions. They have a cost.
|
||||||
*/
|
*/
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
unsigned long oldval;
|
unsigned long oldval;
|
||||||
|
@ -54,7 +54,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp, oldval;
|
unsigned long tmp, oldval;
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define __raw_spin_unlock_wait(lock) \
|
||||||
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"\n1:\n\t"
|
"\n1:\n\t"
|
||||||
|
@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
: "g2", "memory", "cc");
|
: "g2", "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int result;
|
unsigned int result;
|
||||||
__asm__ __volatile__("ldstub [%1], %0"
|
__asm__ __volatile__("ldstub [%1], %0"
|
||||||
|
@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return (result == 0);
|
return (result == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
|
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
do { rmb(); \
|
do { rmb(); \
|
||||||
} while((lp)->lock)
|
} while((lp)->lock)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long result;
|
unsigned long result;
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return (result == 0UL);
|
return (result == 0UL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" stb %%g0, [%0]"
|
" stb %%g0, [%0]"
|
||||||
|
@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned char lock;
|
volatile unsigned char lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
|
static inline int __raw_spin_is_locked(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
|
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
|
static inline int __raw_spin_is_contended(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
|
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
|
static __always_inline void __raw_spin_lock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
|
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
|
static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
|
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
|
static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
|
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
|
static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
|
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,14 +318,14 @@ struct pv_mmu_ops {
|
||||||
phys_addr_t phys, pgprot_t flags);
|
phys_addr_t phys, pgprot_t flags);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct raw_spinlock;
|
struct arch_spinlock;
|
||||||
struct pv_lock_ops {
|
struct pv_lock_ops {
|
||||||
int (*spin_is_locked)(struct raw_spinlock *lock);
|
int (*spin_is_locked)(struct arch_spinlock *lock);
|
||||||
int (*spin_is_contended)(struct raw_spinlock *lock);
|
int (*spin_is_contended)(struct arch_spinlock *lock);
|
||||||
void (*spin_lock)(struct raw_spinlock *lock);
|
void (*spin_lock)(struct arch_spinlock *lock);
|
||||||
void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
|
void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
|
||||||
int (*spin_trylock)(struct raw_spinlock *lock);
|
int (*spin_trylock)(struct arch_spinlock *lock);
|
||||||
void (*spin_unlock)(struct raw_spinlock *lock);
|
void (*spin_unlock)(struct arch_spinlock *lock);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This contains all the paravirt structures: we get a convenient
|
/* This contains all the paravirt structures: we get a convenient
|
||||||
|
|
|
@ -58,7 +58,7 @@
|
||||||
#if (NR_CPUS < 256)
|
#if (NR_CPUS < 256)
|
||||||
#define TICKET_SHIFT 8
|
#define TICKET_SHIFT 8
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
short inc = 0x0100;
|
short inc = 0x0100;
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp, new;
|
int tmp, new;
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
||||||
: "+m" (lock->slock)
|
: "+m" (lock->slock)
|
||||||
|
@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||||
#else
|
#else
|
||||||
#define TICKET_SHIFT 16
|
#define TICKET_SHIFT 16
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int inc = 0x00010000;
|
int inc = 0x00010000;
|
||||||
int tmp;
|
int tmp;
|
||||||
|
@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
int new;
|
int new;
|
||||||
|
@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
||||||
: "+m" (lock->slock)
|
: "+m" (lock->slock)
|
||||||
|
@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp = ACCESS_ONCE(lock->slock);
|
int tmp = ACCESS_ONCE(lock->slock);
|
||||||
|
|
||||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
|
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp = ACCESS_ONCE(lock->slock);
|
int tmp = ACCESS_ONCE(lock->slock);
|
||||||
|
|
||||||
|
@ -174,33 +174,33 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
||||||
|
|
||||||
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __raw_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_locked(lock);
|
return __ticket_spin_is_locked(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
static inline int __raw_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_contended(lock);
|
return __ticket_spin_is_contended(lock);
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_lock(lock);
|
__ticket_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_trylock(lock);
|
return __ticket_spin_trylock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_unlock(lock);
|
__ticket_spin_unlock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
__raw_spin_lock(lock);
|
||||||
|
@ -208,7 +208,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
||||||
|
|
||||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (__raw_spin_is_locked(lock))
|
while (__raw_spin_is_locked(lock))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
|
@ -5,9 +5,9 @@
|
||||||
# error "please don't include this file directly"
|
# error "please don't include this file directly"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct raw_spinlock {
|
typedef struct arch_spinlock {
|
||||||
unsigned int slock;
|
unsigned int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
|
|
|
@ -188,7 +188,7 @@ void dump_stack(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dump_stack);
|
EXPORT_SYMBOL(dump_stack);
|
||||||
|
|
||||||
static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||||
static int die_owner = -1;
|
static int die_owner = -1;
|
||||||
static unsigned int die_nest_count;
|
static unsigned int die_nest_count;
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
__raw_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
|
||||||
* we want to have the fastest, inlined, non-debug version
|
* we want to have the fastest, inlined, non-debug version
|
||||||
* of a critical section, to be able to prove TSC time-warps:
|
* of a critical section, to be able to prove TSC time-warps:
|
||||||
*/
|
*/
|
||||||
static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
static __cpuinitdata cycles_t last_tsc;
|
static __cpuinitdata cycles_t last_tsc;
|
||||||
static __cpuinitdata cycles_t max_warp;
|
static __cpuinitdata cycles_t max_warp;
|
||||||
|
|
|
@ -120,14 +120,14 @@ struct xen_spinlock {
|
||||||
unsigned short spinners; /* count of waiting cpus */
|
unsigned short spinners; /* count of waiting cpus */
|
||||||
};
|
};
|
||||||
|
|
||||||
static int xen_spin_is_locked(struct raw_spinlock *lock)
|
static int xen_spin_is_locked(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
|
|
||||||
return xl->lock != 0;
|
return xl->lock != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_spin_is_contended(struct raw_spinlock *lock)
|
static int xen_spin_is_contended(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
|
||||||
return xl->spinners != 0;
|
return xl->spinners != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_spin_trylock(struct raw_spinlock *lock)
|
static int xen_spin_trylock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
u8 old = 1;
|
u8 old = 1;
|
||||||
|
@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
|
||||||
__get_cpu_var(lock_spinners) = prev;
|
__get_cpu_var(lock_spinners) = prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
|
static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
struct xen_spinlock *prev;
|
struct xen_spinlock *prev;
|
||||||
|
@ -254,7 +254,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
|
static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
unsigned timeout;
|
unsigned timeout;
|
||||||
|
@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
|
||||||
spin_time_accum_total(start_spin);
|
spin_time_accum_total(start_spin);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_spin_lock(struct raw_spinlock *lock)
|
static void xen_spin_lock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
__xen_spin_lock(lock, false);
|
__xen_spin_lock(lock, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
|
static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
|
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
|
||||||
}
|
}
|
||||||
|
@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_spin_unlock(struct raw_spinlock *lock)
|
static void xen_spin_unlock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
|
|
||||||
|
|
|
@ -15,18 +15,18 @@
|
||||||
# define ATOMIC_HASH_SIZE 4
|
# define ATOMIC_HASH_SIZE 4
|
||||||
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
||||||
|
|
||||||
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
||||||
|
|
||||||
/* Can't use raw_spin_lock_irq because of #include problems, so
|
/* Can't use raw_spin_lock_irq because of #include problems, so
|
||||||
* this is the substitute */
|
* this is the substitute */
|
||||||
#define _atomic_spin_lock_irqsave(l,f) do { \
|
#define _atomic_spin_lock_irqsave(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
local_irq_save(f); \
|
local_irq_save(f); \
|
||||||
__raw_spin_lock(s); \
|
__raw_spin_lock(s); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
__raw_spin_unlock(s); \
|
__raw_spin_unlock(s); \
|
||||||
local_irq_restore(f); \
|
local_irq_restore(f); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
*
|
*
|
||||||
* on SMP builds:
|
* on SMP builds:
|
||||||
*
|
*
|
||||||
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
|
* asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the
|
||||||
* initializers
|
* initializers
|
||||||
*
|
*
|
||||||
* linux/spinlock_types.h:
|
* linux/spinlock_types.h:
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
#define __lockfunc __attribute__((section(".spinlock.text")))
|
#define __lockfunc __attribute__((section(".spinlock.text")))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pull the raw_spinlock_t and raw_rwlock_t definitions:
|
* Pull the arch_spinlock_t and raw_rwlock_t definitions:
|
||||||
*/
|
*/
|
||||||
#include <linux/spinlock_types.h>
|
#include <linux/spinlock_types.h>
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
raw_spinlock_t raw_lock;
|
arch_spinlock_t raw_lock;
|
||||||
#ifdef CONFIG_GENERIC_LOCKBREAK
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||||
unsigned int break_lock;
|
unsigned int break_lock;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -16,13 +16,13 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int slock;
|
volatile unsigned int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
typedef struct { } raw_spinlock_t;
|
typedef struct { } arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { }
|
#define __RAW_SPIN_LOCK_UNLOCKED { }
|
||||||
|
|
||||||
|
|
|
@ -20,19 +20,19 @@
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
#define __raw_spin_is_locked(x) ((x)->slock == 0)
|
#define __raw_spin_is_locked(x) ((x)->slock == 0)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void __raw_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
lock->slock = 0;
|
lock->slock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
lock->slock = 0;
|
lock->slock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int __raw_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
char oldval = lock->slock;
|
char oldval = lock->slock;
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return oldval > 0;
|
return oldval > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void __raw_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
lock->slock = 1;
|
lock->slock = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
|
||||||
* to use a raw spinlock - we really dont want the spinlock
|
* to use a raw spinlock - we really dont want the spinlock
|
||||||
* code to recurse back into the lockdep code...
|
* code to recurse back into the lockdep code...
|
||||||
*/
|
*/
|
||||||
static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
static int graph_lock(void)
|
static int graph_lock(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
|
||||||
int cpu;
|
int cpu;
|
||||||
struct ring_buffer *buffer;
|
struct ring_buffer *buffer;
|
||||||
spinlock_t reader_lock; /* serialize readers */
|
spinlock_t reader_lock; /* serialize readers */
|
||||||
raw_spinlock_t lock;
|
arch_spinlock_t lock;
|
||||||
struct lock_class_key lock_key;
|
struct lock_class_key lock_key;
|
||||||
struct list_head *pages;
|
struct list_head *pages;
|
||||||
struct buffer_page *head_page; /* read from head */
|
struct buffer_page *head_page; /* read from head */
|
||||||
|
@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
||||||
cpu_buffer->buffer = buffer;
|
cpu_buffer->buffer = buffer;
|
||||||
spin_lock_init(&cpu_buffer->reader_lock);
|
spin_lock_init(&cpu_buffer->reader_lock);
|
||||||
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
|
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
|
||||||
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
||||||
GFP_KERNEL, cpu_to_node(cpu));
|
GFP_KERNEL, cpu_to_node(cpu));
|
||||||
|
|
|
@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
|
||||||
* protected by per_cpu spinlocks. But the action of the swap
|
* protected by per_cpu spinlocks. But the action of the swap
|
||||||
* needs its own lock.
|
* needs its own lock.
|
||||||
*
|
*
|
||||||
* This is defined as a raw_spinlock_t in order to help
|
* This is defined as a arch_spinlock_t in order to help
|
||||||
* with performance when lockdep debugging is enabled.
|
* with performance when lockdep debugging is enabled.
|
||||||
*
|
*
|
||||||
* It is also used in other places outside the update_max_tr
|
* It is also used in other places outside the update_max_tr
|
||||||
* so it needs to be defined outside of the
|
* so it needs to be defined outside of the
|
||||||
* CONFIG_TRACER_MAX_TRACE.
|
* CONFIG_TRACER_MAX_TRACE.
|
||||||
*/
|
*/
|
||||||
static raw_spinlock_t ftrace_max_lock =
|
static arch_spinlock_t ftrace_max_lock =
|
||||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
unsigned long __read_mostly tracing_max_latency;
|
unsigned long __read_mostly tracing_max_latency;
|
||||||
|
@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
||||||
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
|
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
|
||||||
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
|
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
|
||||||
static int cmdline_idx;
|
static int cmdline_idx;
|
||||||
static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
/* temporary disable recording */
|
/* temporary disable recording */
|
||||||
static atomic_t trace_record_cmdline_disabled __read_mostly;
|
static atomic_t trace_record_cmdline_disabled __read_mostly;
|
||||||
|
@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||||
*/
|
*/
|
||||||
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
static raw_spinlock_t trace_buf_lock =
|
static arch_spinlock_t trace_buf_lock =
|
||||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
static u32 trace_buf[TRACE_BUF_SIZE];
|
static u32 trace_buf[TRACE_BUF_SIZE];
|
||||||
|
|
||||||
struct ftrace_event_call *call = &event_bprint;
|
struct ftrace_event_call *call = &event_bprint;
|
||||||
|
@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
|
||||||
int trace_array_vprintk(struct trace_array *tr,
|
int trace_array_vprintk(struct trace_array *tr,
|
||||||
unsigned long ip, const char *fmt, va_list args)
|
unsigned long ip, const char *fmt, va_list args)
|
||||||
{
|
{
|
||||||
static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||||
static char trace_buf[TRACE_BUF_SIZE];
|
static char trace_buf[TRACE_BUF_SIZE];
|
||||||
|
|
||||||
struct ftrace_event_call *call = &event_print;
|
struct ftrace_event_call *call = &event_print;
|
||||||
|
@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
|
||||||
|
|
||||||
static void __ftrace_dump(bool disable_tracing)
|
static void __ftrace_dump(bool disable_tracing)
|
||||||
{
|
{
|
||||||
static raw_spinlock_t ftrace_dump_lock =
|
static arch_spinlock_t ftrace_dump_lock =
|
||||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
/* use static because iter can be a bit big for the stack */
|
/* use static because iter can be a bit big for the stack */
|
||||||
static struct trace_iterator iter;
|
static struct trace_iterator iter;
|
||||||
unsigned int old_userobj;
|
unsigned int old_userobj;
|
||||||
|
|
|
@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
|
||||||
/* keep prev_time and lock in the same cacheline. */
|
/* keep prev_time and lock in the same cacheline. */
|
||||||
static struct {
|
static struct {
|
||||||
u64 prev_time;
|
u64 prev_time;
|
||||||
raw_spinlock_t lock;
|
arch_spinlock_t lock;
|
||||||
} trace_clock_struct ____cacheline_aligned_in_smp =
|
} trace_clock_struct ____cacheline_aligned_in_smp =
|
||||||
{
|
{
|
||||||
.lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
|
.lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
|
||||||
};
|
};
|
||||||
|
|
||||||
u64 notrace trace_clock_global(void)
|
u64 notrace trace_clock_global(void)
|
||||||
|
|
|
@ -28,8 +28,8 @@ static int wakeup_current_cpu;
|
||||||
static unsigned wakeup_prio = -1;
|
static unsigned wakeup_prio = -1;
|
||||||
static int wakeup_rt;
|
static int wakeup_rt;
|
||||||
|
|
||||||
static raw_spinlock_t wakeup_lock =
|
static arch_spinlock_t wakeup_lock =
|
||||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
static void __wakeup_reset(struct trace_array *tr);
|
static void __wakeup_reset(struct trace_array *tr);
|
||||||
|
|
||||||
|
|
|
@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned long max_stack_size;
|
static unsigned long max_stack_size;
|
||||||
static raw_spinlock_t max_stack_lock =
|
static arch_spinlock_t max_stack_lock =
|
||||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
static int stack_trace_disabled __read_mostly;
|
static int stack_trace_disabled __read_mostly;
|
||||||
static DEFINE_PER_CPU(int, trace_active);
|
static DEFINE_PER_CPU(int, trace_active);
|
||||||
|
|
|
@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
|
||||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
||||||
lockdep_init_map(&lock->dep_map, name, key, 0);
|
lockdep_init_map(&lock->dep_map, name, key, 0);
|
||||||
#endif
|
#endif
|
||||||
lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||||
lock->magic = SPINLOCK_MAGIC;
|
lock->magic = SPINLOCK_MAGIC;
|
||||||
lock->owner = SPINLOCK_OWNER_INIT;
|
lock->owner = SPINLOCK_OWNER_INIT;
|
||||||
lock->owner_cpu = -1;
|
lock->owner_cpu = -1;
|
||||||
|
|
Loading…
Reference in a new issue