mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
This commit is contained in:
commit
8f0ddf91f2
136 changed files with 2485 additions and 2177 deletions
|
@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr)
|
||||||
set_hae(msb); \
|
set_hae(msb); \
|
||||||
}
|
}
|
||||||
|
|
||||||
extern spinlock_t t2_hae_lock;
|
extern raw_spinlock_t t2_hae_lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
|
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
|
||||||
|
@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long result, msb;
|
unsigned long result, msb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
|
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
return __kernel_extbl(result, addr & 3);
|
return __kernel_extbl(result, addr & 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long result, msb;
|
unsigned long result, msb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
|
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
return __kernel_extwl(result, addr & 3);
|
return __kernel_extwl(result, addr & 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long result, msb;
|
unsigned long result, msb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
|
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
return result & 0xffffffffUL;
|
return result & 0xffffffffUL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long r0, r1, work, msb;
|
unsigned long r0, r1, work, msb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
|
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
|
||||||
r0 = *(vuip)(work);
|
r0 = *(vuip)(work);
|
||||||
r1 = *(vuip)(work + (4 << 5));
|
r1 = *(vuip)(work + (4 << 5));
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
return r1 << 32 | r0;
|
return r1 << 32 | r0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long msb, w;
|
unsigned long msb, w;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
w = __kernel_insbl(b, addr & 3);
|
w = __kernel_insbl(b, addr & 3);
|
||||||
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
|
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
|
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
|
||||||
|
@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long msb, w;
|
unsigned long msb, w;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
w = __kernel_inswl(b, addr & 3);
|
w = __kernel_inswl(b, addr & 3);
|
||||||
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
|
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long msb;
|
unsigned long msb;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
|
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
|
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
|
||||||
|
@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
|
||||||
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
|
||||||
unsigned long msb, work;
|
unsigned long msb, work;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
spin_lock_irqsave(&t2_hae_lock, flags);
|
raw_spin_lock_irqsave(&t2_hae_lock, flags);
|
||||||
|
|
||||||
t2_set_hae;
|
t2_set_hae;
|
||||||
|
|
||||||
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
|
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
|
||||||
*(vuip)work = b;
|
*(vuip)work = b;
|
||||||
*(vuip)(work + (4 << 5)) = b >> 32;
|
*(vuip)(work + (4 << 5)) = b >> 32;
|
||||||
spin_unlock_irqrestore(&t2_hae_lock, flags);
|
raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
|
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
|
||||||
|
|
|
@ -12,18 +12,18 @@
|
||||||
* We make no fairness assumptions. They have a cost.
|
* We make no fairness assumptions. They have a cost.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
#define __raw_spin_is_locked(x) ((x)->lock != 0)
|
#define arch_spin_is_locked(x) ((x)->lock != 0)
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define arch_spin_unlock_wait(x) \
|
||||||
do { cpu_relax(); } while ((x)->lock)
|
do { cpu_relax(); } while ((x)->lock)
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t * lock)
|
static inline void arch_spin_unlock(arch_spinlock_t * lock)
|
||||||
{
|
{
|
||||||
mb();
|
mb();
|
||||||
lock->lock = 0;
|
lock->lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t * lock)
|
static inline void arch_spin_lock(arch_spinlock_t * lock)
|
||||||
{
|
{
|
||||||
long tmp;
|
long tmp;
|
||||||
|
|
||||||
|
@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
|
||||||
: "m"(lock->lock) : "memory");
|
: "m"(lock->lock) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return !test_and_set_bit(0, &lock->lock);
|
return !test_and_set_bit(0, &lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/***********************************************************/
|
/***********************************************************/
|
||||||
|
|
||||||
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
|
static inline int arch_read_can_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
return (lock->lock & 1) == 0;
|
return (lock->lock & 1) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
|
static inline int arch_write_can_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
return lock->lock == 0;
|
return lock->lock == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *lock)
|
static inline void arch_read_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
long regx;
|
long regx;
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
|
||||||
: "m" (*lock) : "memory");
|
: "m" (*lock) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *lock)
|
static inline void arch_write_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
long regx;
|
long regx;
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
|
||||||
: "m" (*lock) : "memory");
|
: "m" (*lock) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t * lock)
|
static inline int arch_read_trylock(arch_rwlock_t * lock)
|
||||||
{
|
{
|
||||||
long regx;
|
long regx;
|
||||||
int success;
|
int success;
|
||||||
|
@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t * lock)
|
static inline int arch_write_trylock(arch_rwlock_t * lock)
|
||||||
{
|
{
|
||||||
long regx;
|
long regx;
|
||||||
int success;
|
int success;
|
||||||
|
@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t * lock)
|
static inline void arch_read_unlock(arch_rwlock_t * lock)
|
||||||
{
|
{
|
||||||
long regx;
|
long regx;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
|
@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
|
||||||
: "m" (*lock) : "memory");
|
: "m" (*lock) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t * lock)
|
static inline void arch_write_unlock(arch_rwlock_t * lock)
|
||||||
{
|
{
|
||||||
mb();
|
mb();
|
||||||
lock->lock = 0;
|
lock->lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* _ALPHA_SPINLOCK_H */
|
#endif /* _ALPHA_SPINLOCK_H */
|
||||||
|
|
|
@ -7,14 +7,14 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -74,7 +74,7 @@
|
||||||
# define DBG(args)
|
# define DBG(args)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DEFINE_SPINLOCK(t2_hae_lock);
|
DEFINE_RAW_SPINLOCK(t2_hae_lock);
|
||||||
|
|
||||||
static volatile unsigned int t2_mcheck_any_expected;
|
static volatile unsigned int t2_mcheck_any_expected;
|
||||||
static volatile unsigned int t2_mcheck_last_taken;
|
static volatile unsigned int t2_mcheck_last_taken;
|
||||||
|
|
|
@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (irq < ACTUAL_NR_IRQS) {
|
if (irq < ACTUAL_NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[irq].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
|
||||||
action = irq_desc[irq].action;
|
action = irq_desc[irq].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
||||||
} else if (irq == ACTUAL_NR_IRQS) {
|
} else if (irq == ACTUAL_NR_IRQS) {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
seq_puts(p, "IPI: ");
|
seq_puts(p, "IPI: ");
|
||||||
|
|
|
@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *);
|
||||||
*/
|
*/
|
||||||
#define do_bad_IRQ(irq,desc) \
|
#define do_bad_IRQ(irq,desc) \
|
||||||
do { \
|
do { \
|
||||||
spin_lock(&desc->lock); \
|
raw_spin_lock(&desc->lock); \
|
||||||
handle_bad_irq(irq, desc); \
|
handle_bad_irq(irq, desc); \
|
||||||
spin_unlock(&desc->lock); \
|
raw_spin_unlock(&desc->lock); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -17,13 +17,13 @@
|
||||||
* Locked value: 1
|
* Locked value: 1
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_spin_is_locked(x) ((x)->lock != 0)
|
#define arch_spin_is_locked(x) ((x)->lock != 0)
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define arch_spin_unlock_wait(lock) \
|
||||||
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||||
|
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
smp_mb();
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
* just write zero since the lock is exclusively held.
|
* just write zero since the lock is exclusively held.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
smp_mb();
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* write_can_lock - would write_trylock() succeed? */
|
/* write_can_lock - would write_trylock() succeed? */
|
||||||
#define __raw_write_can_lock(x) ((x)->lock == 0)
|
#define arch_write_can_lock(x) ((x)->lock == 0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read locks are a bit more hairy:
|
* Read locks are a bit more hairy:
|
||||||
|
@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||||
* currently active. However, we know we won't have any write
|
* currently active. However, we know we won't have any write
|
||||||
* locks.
|
* locks.
|
||||||
*/
|
*/
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp, tmp2;
|
unsigned long tmp, tmp2;
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
smp_mb();
|
smp_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp, tmp2;
|
unsigned long tmp, tmp2;
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
: "cc");
|
: "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp, tmp2 = 1;
|
unsigned long tmp, tmp2 = 1;
|
||||||
|
|
||||||
|
@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* read_can_lock - would read_trylock() succeed? */
|
/* read_can_lock - would read_trylock() succeed? */
|
||||||
#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
|
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -7,14 +7,14 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
#ifdef CONFIG_FIQ
|
#ifdef CONFIG_FIQ
|
||||||
show_fiq_list(p, v);
|
show_fiq_list(p, v);
|
||||||
|
@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
|
||||||
}
|
}
|
||||||
|
|
||||||
desc = irq_desc + irq;
|
desc = irq_desc + irq;
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||||
if (iflags & IRQF_VALID)
|
if (iflags & IRQF_VALID)
|
||||||
desc->status &= ~IRQ_NOREQUEST;
|
desc->status &= ~IRQ_NOREQUEST;
|
||||||
|
@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
|
||||||
desc->status &= ~IRQ_NOPROBE;
|
desc->status &= ~IRQ_NOPROBE;
|
||||||
if (!(iflags & IRQF_NOAUTOEN))
|
if (!(iflags & IRQF_NOAUTOEN))
|
||||||
desc->status &= ~IRQ_NOAUTOEN;
|
desc->status &= ~IRQ_NOAUTOEN;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init init_IRQ(void)
|
void __init init_IRQ(void)
|
||||||
|
@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
|
||||||
{
|
{
|
||||||
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
|
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
|
||||||
|
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
desc->chip->set_affinity(irq, cpumask_of(cpu));
|
desc->chip->set_affinity(irq, cpumask_of(cpu));
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
struct irqaction *action;
|
struct irqaction *action;
|
||||||
irqreturn_t action_ret;
|
irqreturn_t action_ret;
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
BUG_ON(desc->status & IRQ_INPROGRESS);
|
BUG_ON(desc->status & IRQ_INPROGRESS);
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
goto out_mask;
|
goto out_mask;
|
||||||
|
|
||||||
desc->status |= IRQ_INPROGRESS;
|
desc->status |= IRQ_INPROGRESS;
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
action_ret = handle_IRQ_event(irq, action);
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
* Maybe this function should go to kernel/irq/chip.c? */
|
* Maybe this function should go to kernel/irq/chip.c? */
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
|
|
||||||
if (desc->status & IRQ_DISABLED)
|
if (desc->status & IRQ_DISABLED)
|
||||||
|
@ -97,7 +97,7 @@ out_mask:
|
||||||
/* ack unconditionally to unmask lower prio irqs */
|
/* ack unconditionally to unmask lower prio irqs */
|
||||||
desc->chip->ack(irq);
|
desc->chip->ack(irq);
|
||||||
|
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
#define handle_irq handle_prio_irq
|
#define handle_irq handle_prio_irq
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -42,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -57,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
|
||||||
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
|
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
|
||||||
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
|
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
|
||||||
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
|
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
|
||||||
asmlinkage void __raw_read_lock_asm(volatile int *ptr);
|
asmlinkage void arch_read_lock_asm(volatile int *ptr);
|
||||||
asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
|
asmlinkage int arch_read_trylock_asm(volatile int *ptr);
|
||||||
asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
|
asmlinkage void arch_read_unlock_asm(volatile int *ptr);
|
||||||
asmlinkage void __raw_write_lock_asm(volatile int *ptr);
|
asmlinkage void arch_write_lock_asm(volatile int *ptr);
|
||||||
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
|
asmlinkage int arch_write_trylock_asm(volatile int *ptr);
|
||||||
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
|
asmlinkage void arch_write_unlock_asm(volatile int *ptr);
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __raw_spin_is_locked_asm(&lock->lock);
|
return __raw_spin_is_locked_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__raw_spin_lock_asm(&lock->lock);
|
__raw_spin_lock_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __raw_spin_trylock_asm(&lock->lock);
|
return __raw_spin_trylock_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__raw_spin_unlock_asm(&lock->lock);
|
__raw_spin_unlock_asm(&lock->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (__raw_spin_is_locked(lock))
|
while (arch_spin_is_locked(lock))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_can_lock(raw_rwlock_t *rw)
|
static inline int arch_read_can_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return __raw_uncached_fetch_asm(&rw->lock) > 0;
|
return __raw_uncached_fetch_asm(&rw->lock) > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_can_lock(raw_rwlock_t *rw)
|
static inline int arch_write_can_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
|
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_read_lock_asm(&rw->lock);
|
arch_read_lock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return __raw_read_trylock_asm(&rw->lock);
|
return arch_read_trylock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_read_unlock_asm(&rw->lock);
|
arch_read_unlock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_write_lock_asm(&rw->lock);
|
arch_write_lock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return __raw_write_trylock_asm(&rw->lock);
|
return arch_write_trylock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_write_unlock_asm(&rw->lock);
|
arch_write_unlock_asm(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -15,14 +15,14 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
|
||||||
|
|
||||||
static struct irq_desc bad_irq_desc = {
|
static struct irq_desc bad_irq_desc = {
|
||||||
.handle_irq = handle_bad_irq,
|
.handle_irq = handle_bad_irq,
|
||||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
.lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||||
|
@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
seq_printf(p, "NMI: ");
|
seq_printf(p, "NMI: ");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
|
|
|
@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp)
|
||||||
if (fp->ipend & ~0x3F) {
|
if (fp->ipend & ~0x3F) {
|
||||||
for (i = 0; i < (NR_IRQS - 1); i++) {
|
for (i = 0; i < (NR_IRQS - 1); i++) {
|
||||||
if (!in_atomic)
|
if (!in_atomic)
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
|
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
|
@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp)
|
||||||
verbose_printk("\n");
|
verbose_printk("\n");
|
||||||
unlock:
|
unlock:
|
||||||
if (!in_atomic)
|
if (!in_atomic)
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
|
||||||
extern void cris_spin_lock(void *l);
|
extern void cris_spin_lock(void *l);
|
||||||
extern int cris_spin_trylock(void *l);
|
extern int cris_spin_trylock(void *l);
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
static inline int arch_spin_is_locked(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
return *(volatile signed char *)(&(x)->slock) <= 0;
|
return *(volatile signed char *)(&(x)->slock) <= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ volatile ("move.d %1,%0" \
|
__asm__ volatile ("move.d %1,%0" \
|
||||||
: "=m" (lock->slock) \
|
: "=m" (lock->slock) \
|
||||||
|
@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (__raw_spin_is_locked(lock))
|
while (arch_spin_is_locked(lock))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return cris_spin_trylock((void *)&lock->slock);
|
return cris_spin_trylock((void *)&lock->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
cris_spin_lock((void *)&lock->slock);
|
cris_spin_lock((void *)&lock->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
arch_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline int __raw_read_can_lock(raw_rwlock_t *x)
|
static inline int arch_read_can_lock(arch_rwlock_t *x)
|
||||||
{
|
{
|
||||||
return (int)(x)->lock > 0;
|
return (int)(x)->lock > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_can_lock(raw_rwlock_t *x)
|
static inline int arch_write_can_lock(arch_rwlock_t *x)
|
||||||
{
|
{
|
||||||
return (x)->lock == RW_LOCK_BIAS;
|
return (x)->lock == RW_LOCK_BIAS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(&rw->slock);
|
arch_spin_lock(&rw->slock);
|
||||||
while (rw->lock == 0);
|
while (rw->lock == 0);
|
||||||
rw->lock--;
|
rw->lock--;
|
||||||
__raw_spin_unlock(&rw->slock);
|
arch_spin_unlock(&rw->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(&rw->slock);
|
arch_spin_lock(&rw->slock);
|
||||||
while (rw->lock != RW_LOCK_BIAS);
|
while (rw->lock != RW_LOCK_BIAS);
|
||||||
rw->lock = 0;
|
rw->lock = 0;
|
||||||
__raw_spin_unlock(&rw->slock);
|
arch_spin_unlock(&rw->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(&rw->slock);
|
arch_spin_lock(&rw->slock);
|
||||||
rw->lock++;
|
rw->lock++;
|
||||||
__raw_spin_unlock(&rw->slock);
|
arch_spin_unlock(&rw->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(&rw->slock);
|
arch_spin_lock(&rw->slock);
|
||||||
while (rw->lock != RW_LOCK_BIAS);
|
while (rw->lock != RW_LOCK_BIAS);
|
||||||
rw->lock = RW_LOCK_BIAS;
|
rw->lock = RW_LOCK_BIAS;
|
||||||
__raw_spin_unlock(&rw->slock);
|
arch_spin_unlock(&rw->slock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
__raw_spin_lock(&rw->slock);
|
arch_spin_lock(&rw->slock);
|
||||||
if (rw->lock != 0) {
|
if (rw->lock != 0) {
|
||||||
rw->lock--;
|
rw->lock--;
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
__raw_spin_unlock(&rw->slock);
|
arch_spin_unlock(&rw->slock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
__raw_spin_lock(&rw->slock);
|
arch_spin_lock(&rw->slock);
|
||||||
if (rw->lock == RW_LOCK_BIAS) {
|
if (rw->lock == RW_LOCK_BIAS) {
|
||||||
rw->lock = 0;
|
rw->lock = 0;
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
__raw_spin_unlock(&rw->slock);
|
arch_spin_unlock(&rw->slock);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
|
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
|
||||||
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
|
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* __ASM_ARCH_SPINLOCK_H */
|
#endif /* __ASM_ARCH_SPINLOCK_H */
|
||||||
|
|
|
@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (action) {
|
if (action) {
|
||||||
seq_printf(p, "%3d: ", i);
|
seq_printf(p, "%3d: ", i);
|
||||||
|
@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
|
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
|
||||||
}
|
}
|
||||||
|
|
|
@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
seq_puts(p, " CPU0");
|
seq_puts(p, " CPU0");
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
seq_printf(p, ", %s", action->name);
|
seq_printf(p, ", %s", action->name);
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
|
||||||
* @addr: Address to start counting from
|
* @addr: Address to start counting from
|
||||||
*
|
*
|
||||||
* Similarly to clear_bit_unlock, the implementation uses a store
|
* Similarly to clear_bit_unlock, the implementation uses a store
|
||||||
* with release semantics. See also __raw_spin_unlock().
|
* with release semantics. See also arch_spin_unlock().
|
||||||
*/
|
*/
|
||||||
static __inline__ void
|
static __inline__ void
|
||||||
__clear_bit_unlock(int nr, void *addr)
|
__clear_bit_unlock(int nr, void *addr)
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
#include <asm/intrinsics.h>
|
#include <asm/intrinsics.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
|
|
||||||
#define __raw_spin_lock_init(x) ((x)->lock = 0)
|
#define arch_spin_lock_init(x) ((x)->lock = 0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ticket locks are conceptually two parts, one indicating the current head of
|
* Ticket locks are conceptually two parts, one indicating the current head of
|
||||||
|
@ -38,7 +38,7 @@
|
||||||
#define TICKET_BITS 15
|
#define TICKET_BITS 15
|
||||||
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
|
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int *p = (int *)&lock->lock, ticket, serve;
|
int *p = (int *)&lock->lock, ticket, serve;
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp = ACCESS_ONCE(lock->lock);
|
int tmp = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||||
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int *p = (int *)&lock->lock, ticket;
|
int *p = (int *)&lock->lock, ticket;
|
||||||
|
|
||||||
|
@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
long tmp = ACCESS_ONCE(lock->lock);
|
long tmp = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
|
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
long tmp = ACCESS_ONCE(lock->lock);
|
long tmp = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
|
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_locked(lock);
|
return __ticket_spin_is_locked(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_contended(lock);
|
return __ticket_spin_is_contended(lock);
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define arch_spin_is_contended arch_spin_is_contended
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_lock(lock);
|
__ticket_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_trylock(lock);
|
return __ticket_spin_trylock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_unlock(lock);
|
__ticket_spin_unlock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
arch_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_unlock_wait(lock);
|
__ticket_spin_unlock_wait(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
|
#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
|
||||||
#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
|
#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
|
||||||
|
|
||||||
#ifdef ASM_SUPPORTED
|
#ifdef ASM_SUPPORTED
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
|
arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"tbit.nz p6, p0 = %1,%2\n"
|
"tbit.nz p6, p0 = %1,%2\n"
|
||||||
|
@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
|
||||||
: "p6", "p7", "r2", "memory");
|
: "p6", "p7", "r2", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
|
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
|
||||||
|
|
||||||
#else /* !ASM_SUPPORTED */
|
#else /* !ASM_SUPPORTED */
|
||||||
|
|
||||||
#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
|
#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
|
||||||
|
|
||||||
#define __raw_read_lock(rw) \
|
#define arch_read_lock(rw) \
|
||||||
do { \
|
do { \
|
||||||
raw_rwlock_t *__read_lock_ptr = (rw); \
|
arch_rwlock_t *__read_lock_ptr = (rw); \
|
||||||
\
|
\
|
||||||
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
|
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
|
||||||
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
|
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
|
||||||
|
@ -188,16 +188,16 @@ do { \
|
||||||
|
|
||||||
#endif /* !ASM_SUPPORTED */
|
#endif /* !ASM_SUPPORTED */
|
||||||
|
|
||||||
#define __raw_read_unlock(rw) \
|
#define arch_read_unlock(rw) \
|
||||||
do { \
|
do { \
|
||||||
raw_rwlock_t *__read_lock_ptr = (rw); \
|
arch_rwlock_t *__read_lock_ptr = (rw); \
|
||||||
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
|
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifdef ASM_SUPPORTED
|
#ifdef ASM_SUPPORTED
|
||||||
|
|
||||||
static __always_inline void
|
static __always_inline void
|
||||||
__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
|
arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"tbit.nz p6, p0 = %1, %2\n"
|
"tbit.nz p6, p0 = %1, %2\n"
|
||||||
|
@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
|
||||||
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
|
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
|
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
|
||||||
|
|
||||||
#define __raw_write_trylock(rw) \
|
#define arch_write_trylock(rw) \
|
||||||
({ \
|
({ \
|
||||||
register long result; \
|
register long result; \
|
||||||
\
|
\
|
||||||
|
@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
|
||||||
(result == 0); \
|
(result == 0); \
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *x)
|
static inline void arch_write_unlock(arch_rwlock_t *x)
|
||||||
{
|
{
|
||||||
u8 *y = (u8 *)x;
|
u8 *y = (u8 *)x;
|
||||||
barrier();
|
barrier();
|
||||||
|
@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
|
||||||
|
|
||||||
#else /* !ASM_SUPPORTED */
|
#else /* !ASM_SUPPORTED */
|
||||||
|
|
||||||
#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
|
#define arch_write_lock_flags(l, flags) arch_write_lock(l)
|
||||||
|
|
||||||
#define __raw_write_lock(l) \
|
#define arch_write_lock(l) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
|
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
|
||||||
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
|
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
|
||||||
|
@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
|
||||||
} while (ia64_val); \
|
} while (ia64_val); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __raw_write_trylock(rw) \
|
#define arch_write_trylock(rw) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_val; \
|
__u64 ia64_val; \
|
||||||
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
|
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
|
||||||
|
@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
|
||||||
(ia64_val == 0); \
|
(ia64_val == 0); \
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *x)
|
static inline void arch_write_unlock(arch_rwlock_t *x)
|
||||||
{
|
{
|
||||||
barrier();
|
barrier();
|
||||||
x->write_lock = 0;
|
x->write_lock = 0;
|
||||||
|
@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
|
||||||
|
|
||||||
#endif /* !ASM_SUPPORTED */
|
#endif /* !ASM_SUPPORTED */
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *x)
|
static inline int arch_read_trylock(arch_rwlock_t *x)
|
||||||
{
|
{
|
||||||
union {
|
union {
|
||||||
raw_rwlock_t lock;
|
arch_rwlock_t lock;
|
||||||
__u32 word;
|
__u32 word;
|
||||||
} old, new;
|
} old, new;
|
||||||
old.lock = new.lock = *x;
|
old.lock = new.lock = *x;
|
||||||
|
@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
|
||||||
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
|
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* _ASM_IA64_SPINLOCK_H */
|
#endif /* _ASM_IA64_SPINLOCK_H */
|
||||||
|
|
|
@ -7,15 +7,15 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int read_counter : 31;
|
volatile unsigned int read_counter : 31;
|
||||||
volatile unsigned int write_lock : 1;
|
volatile unsigned int write_lock : 1;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
|
||||||
goto unlock_iosapic_lock;
|
goto unlock_iosapic_lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&irq_desc[irq].lock);
|
raw_spin_lock(&irq_desc[irq].lock);
|
||||||
dest = get_target_cpu(gsi, irq);
|
dest = get_target_cpu(gsi, irq);
|
||||||
dmode = choose_dmode();
|
dmode = choose_dmode();
|
||||||
err = register_intr(gsi, irq, dmode, polarity, trigger);
|
err = register_intr(gsi, irq, dmode, polarity, trigger);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
spin_unlock(&irq_desc[irq].lock);
|
raw_spin_unlock(&irq_desc[irq].lock);
|
||||||
irq = err;
|
irq = err;
|
||||||
goto unlock_iosapic_lock;
|
goto unlock_iosapic_lock;
|
||||||
}
|
}
|
||||||
|
@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
|
||||||
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
|
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
|
||||||
cpu_logical_id(dest), dest, irq_to_vector(irq));
|
cpu_logical_id(dest), dest, irq_to_vector(irq));
|
||||||
|
|
||||||
spin_unlock(&irq_desc[irq].lock);
|
raw_spin_unlock(&irq_desc[irq].lock);
|
||||||
unlock_iosapic_lock:
|
unlock_iosapic_lock:
|
||||||
spin_unlock_irqrestore(&iosapic_lock, flags);
|
spin_unlock_irqrestore(&iosapic_lock, flags);
|
||||||
return irq;
|
return irq;
|
||||||
|
|
|
@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS)
|
} else if (i == NR_IRQS)
|
||||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
|
||||||
|
|
||||||
desc = irq_desc + irq;
|
desc = irq_desc + irq;
|
||||||
cfg = irq_cfg + irq;
|
cfg = irq_cfg + irq;
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if (!cfg->move_cleanup_count)
|
if (!cfg->move_cleanup_count)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
|
@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
|
||||||
spin_unlock_irqrestore(&vector_lock, flags);
|
spin_unlock_irqrestore(&vector_lock, flags);
|
||||||
cfg->move_cleanup_count--;
|
cfg->move_cleanup_count--;
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,19 +24,19 @@
|
||||||
* We make no fairness assumptions. They have a cost.
|
* We make no fairness assumptions. They have a cost.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
|
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define arch_spin_unlock_wait(x) \
|
||||||
do { cpu_relax(); } while (__raw_spin_is_locked(x))
|
do { cpu_relax(); } while (arch_spin_is_locked(x))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __raw_spin_trylock - Try spin lock and return a result
|
* arch_spin_trylock - Try spin lock and return a result
|
||||||
* @lock: Pointer to the lock variable
|
* @lock: Pointer to the lock variable
|
||||||
*
|
*
|
||||||
* __raw_spin_trylock() tries to get the lock and returns a result.
|
* arch_spin_trylock() tries to get the lock and returns a result.
|
||||||
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
|
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
|
||||||
*/
|
*/
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int oldval;
|
int oldval;
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"# __raw_spin_trylock \n\t"
|
"# arch_spin_trylock \n\t"
|
||||||
"ldi %1, #0; \n\t"
|
"ldi %1, #0; \n\t"
|
||||||
"mvfc %2, psw; \n\t"
|
"mvfc %2, psw; \n\t"
|
||||||
"clrpsw #0x40 -> nop; \n\t"
|
"clrpsw #0x40 -> nop; \n\t"
|
||||||
|
@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return (oldval > 0);
|
return (oldval > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp0, tmp1;
|
unsigned long tmp0, tmp1;
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"# __raw_spin_lock \n\t"
|
"# arch_spin_lock \n\t"
|
||||||
".fillinsn \n"
|
".fillinsn \n"
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"mvfc %1, psw; \n\t"
|
"mvfc %1, psw; \n\t"
|
||||||
|
@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
mb();
|
mb();
|
||||||
lock->slock = 1;
|
lock->slock = 1;
|
||||||
|
@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
* read_can_lock - would read_trylock() succeed?
|
* read_can_lock - would read_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
|
#define arch_read_can_lock(x) ((int)(x)->lock > 0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_can_lock - would write_trylock() succeed?
|
* write_can_lock - would write_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp0, tmp1;
|
unsigned long tmp0, tmp1;
|
||||||
|
|
||||||
|
@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp0, tmp1, tmp2;
|
unsigned long tmp0, tmp1, tmp2;
|
||||||
|
|
||||||
|
@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp0, tmp1;
|
unsigned long tmp0, tmp1;
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp0, tmp1, tmp2;
|
unsigned long tmp0, tmp1, tmp2;
|
||||||
|
|
||||||
|
@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
static inline int arch_read_trylock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
atomic_t *count = (atomic_t*)lock;
|
atomic_t *count = (atomic_t*)lock;
|
||||||
if (atomic_dec_return(count) >= 0)
|
if (atomic_dec_return(count) >= 0)
|
||||||
|
@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
static inline int arch_write_trylock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
atomic_t *count = (atomic_t *)lock;
|
atomic_t *count = (atomic_t *)lock;
|
||||||
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
|
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
|
||||||
|
@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* _ASM_M32R_SPINLOCK_H */
|
#endif /* _ASM_M32R_SPINLOCK_H */
|
||||||
|
|
|
@ -7,17 +7,17 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile int slock;
|
volatile int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile int lock;
|
volatile int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define RW_LOCK_BIAS 0x01000000
|
#define RW_LOCK_BIAS 0x01000000
|
||||||
#define RW_LOCK_BIAS_STR "0x01000000"
|
#define RW_LOCK_BIAS_STR "0x01000000"
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
||||||
|
|
||||||
#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
|
#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
|
||||||
|
|
|
@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < nr_irq) {
|
if (i < nr_irq) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,33 +34,33 @@
|
||||||
* becomes equal to the the initial value of the tail.
|
* becomes equal to the the initial value of the tail.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int counters = ACCESS_ONCE(lock->lock);
|
unsigned int counters = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
return ((counters >> 14) ^ counters) & 0x1fff;
|
return ((counters >> 14) ^ counters) & 0x1fff;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define arch_spin_unlock_wait(x) \
|
||||||
while (__raw_spin_is_locked(x)) { cpu_relax(); }
|
while (arch_spin_is_locked(x)) { cpu_relax(); }
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int counters = ACCESS_ONCE(lock->lock);
|
unsigned int counters = ACCESS_ONCE(lock->lock);
|
||||||
|
|
||||||
return (((counters >> 14) - counters) & 0x1fff) > 1;
|
return (((counters >> 14) - counters) & 0x1fff) > 1;
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define arch_spin_is_contended arch_spin_is_contended
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int my_ticket;
|
int my_ticket;
|
||||||
int tmp;
|
int tmp;
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
" .set push # __raw_spin_lock \n"
|
" .set push # arch_spin_lock \n"
|
||||||
" .set noreorder \n"
|
" .set noreorder \n"
|
||||||
" \n"
|
" \n"
|
||||||
"1: ll %[ticket], %[ticket_ptr] \n"
|
"1: ll %[ticket], %[ticket_ptr] \n"
|
||||||
|
@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
[my_ticket] "=&r" (my_ticket));
|
[my_ticket] "=&r" (my_ticket));
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
" .set push # __raw_spin_lock \n"
|
" .set push # arch_spin_lock \n"
|
||||||
" .set noreorder \n"
|
" .set noreorder \n"
|
||||||
" \n"
|
" \n"
|
||||||
" ll %[ticket], %[ticket_ptr] \n"
|
" ll %[ticket], %[ticket_ptr] \n"
|
||||||
|
@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
smp_llsc_mb();
|
smp_llsc_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
" # __raw_spin_unlock \n"
|
" # arch_spin_unlock \n"
|
||||||
"1: ll %[ticket], %[ticket_ptr] \n"
|
"1: ll %[ticket], %[ticket_ptr] \n"
|
||||||
" addiu %[ticket], %[ticket], 1 \n"
|
" addiu %[ticket], %[ticket], 1 \n"
|
||||||
" ori %[ticket], %[ticket], 0x2000 \n"
|
" ori %[ticket], %[ticket], 0x2000 \n"
|
||||||
|
@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
[ticket] "=&r" (tmp));
|
[ticket] "=&r" (tmp));
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
" .set push # __raw_spin_unlock \n"
|
" .set push # arch_spin_unlock \n"
|
||||||
" .set noreorder \n"
|
" .set noreorder \n"
|
||||||
" \n"
|
" \n"
|
||||||
" ll %[ticket], %[ticket_ptr] \n"
|
" ll %[ticket], %[ticket_ptr] \n"
|
||||||
|
@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp, tmp2, tmp3;
|
int tmp, tmp2, tmp3;
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
" .set push # __raw_spin_trylock \n"
|
" .set push # arch_spin_trylock \n"
|
||||||
" .set noreorder \n"
|
" .set noreorder \n"
|
||||||
" \n"
|
" \n"
|
||||||
"1: ll %[ticket], %[ticket_ptr] \n"
|
"1: ll %[ticket], %[ticket_ptr] \n"
|
||||||
|
@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
[now_serving] "=&r" (tmp3));
|
[now_serving] "=&r" (tmp3));
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
" .set push # __raw_spin_trylock \n"
|
" .set push # arch_spin_trylock \n"
|
||||||
" .set noreorder \n"
|
" .set noreorder \n"
|
||||||
" \n"
|
" \n"
|
||||||
" ll %[ticket], %[ticket_ptr] \n"
|
" ll %[ticket], %[ticket_ptr] \n"
|
||||||
|
@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
* read_can_lock - would read_trylock() succeed?
|
* read_can_lock - would read_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
|
#define arch_read_can_lock(rw) ((rw)->lock >= 0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* write_can_lock - would write_trylock() succeed?
|
* write_can_lock - would write_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_write_can_lock(rw) (!(rw)->lock)
|
#define arch_write_can_lock(rw) (!(rw)->lock)
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int tmp;
|
unsigned int tmp;
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_read_lock \n"
|
" .set noreorder # arch_read_lock \n"
|
||||||
"1: ll %1, %2 \n"
|
"1: ll %1, %2 \n"
|
||||||
" bltz %1, 1b \n"
|
" bltz %1, 1b \n"
|
||||||
" addu %1, 1 \n"
|
" addu %1, 1 \n"
|
||||||
|
@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
: "memory");
|
: "memory");
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_read_lock \n"
|
" .set noreorder # arch_read_lock \n"
|
||||||
"1: ll %1, %2 \n"
|
"1: ll %1, %2 \n"
|
||||||
" bltz %1, 2f \n"
|
" bltz %1, 2f \n"
|
||||||
" addu %1, 1 \n"
|
" addu %1, 1 \n"
|
||||||
|
@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
/* Note the use of sub, not subu which will make the kernel die with an
|
/* Note the use of sub, not subu which will make the kernel die with an
|
||||||
overflow exception if we ever try to unlock an rwlock that is already
|
overflow exception if we ever try to unlock an rwlock that is already
|
||||||
unlocked or is being held by a writer. */
|
unlocked or is being held by a writer. */
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int tmp;
|
unsigned int tmp;
|
||||||
|
|
||||||
|
@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"1: ll %1, %2 # __raw_read_unlock \n"
|
"1: ll %1, %2 # arch_read_unlock \n"
|
||||||
" sub %1, 1 \n"
|
" sub %1, 1 \n"
|
||||||
" sc %1, %0 \n"
|
" sc %1, %0 \n"
|
||||||
" beqzl %1, 1b \n"
|
" beqzl %1, 1b \n"
|
||||||
|
@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
: "memory");
|
: "memory");
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_read_unlock \n"
|
" .set noreorder # arch_read_unlock \n"
|
||||||
"1: ll %1, %2 \n"
|
"1: ll %1, %2 \n"
|
||||||
" sub %1, 1 \n"
|
" sub %1, 1 \n"
|
||||||
" sc %1, %0 \n"
|
" sc %1, %0 \n"
|
||||||
|
@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int tmp;
|
unsigned int tmp;
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_write_lock \n"
|
" .set noreorder # arch_write_lock \n"
|
||||||
"1: ll %1, %2 \n"
|
"1: ll %1, %2 \n"
|
||||||
" bnez %1, 1b \n"
|
" bnez %1, 1b \n"
|
||||||
" lui %1, 0x8000 \n"
|
" lui %1, 0x8000 \n"
|
||||||
|
@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
: "memory");
|
: "memory");
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_write_lock \n"
|
" .set noreorder # arch_write_lock \n"
|
||||||
"1: ll %1, %2 \n"
|
"1: ll %1, %2 \n"
|
||||||
" bnez %1, 2f \n"
|
" bnez %1, 2f \n"
|
||||||
" lui %1, 0x8000 \n"
|
" lui %1, 0x8000 \n"
|
||||||
|
@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
smp_llsc_mb();
|
smp_llsc_mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" # __raw_write_unlock \n"
|
" # arch_write_unlock \n"
|
||||||
" sw $0, %0 \n"
|
" sw $0, %0 \n"
|
||||||
: "=m" (rw->lock)
|
: "=m" (rw->lock)
|
||||||
: "m" (rw->lock)
|
: "m" (rw->lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int tmp;
|
unsigned int tmp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_read_trylock \n"
|
" .set noreorder # arch_read_trylock \n"
|
||||||
" li %2, 0 \n"
|
" li %2, 0 \n"
|
||||||
"1: ll %1, %3 \n"
|
"1: ll %1, %3 \n"
|
||||||
" bltz %1, 2f \n"
|
" bltz %1, 2f \n"
|
||||||
|
@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
: "memory");
|
: "memory");
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_read_trylock \n"
|
" .set noreorder # arch_read_trylock \n"
|
||||||
" li %2, 0 \n"
|
" li %2, 0 \n"
|
||||||
"1: ll %1, %3 \n"
|
"1: ll %1, %3 \n"
|
||||||
" bltz %1, 2f \n"
|
" bltz %1, 2f \n"
|
||||||
|
@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int tmp;
|
unsigned int tmp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (R10000_LLSC_WAR) {
|
if (R10000_LLSC_WAR) {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_write_trylock \n"
|
" .set noreorder # arch_write_trylock \n"
|
||||||
" li %2, 0 \n"
|
" li %2, 0 \n"
|
||||||
"1: ll %1, %3 \n"
|
"1: ll %1, %3 \n"
|
||||||
" bnez %1, 2f \n"
|
" bnez %1, 2f \n"
|
||||||
|
@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
: "memory");
|
: "memory");
|
||||||
} else {
|
} else {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" .set noreorder # __raw_write_trylock \n"
|
" .set noreorder # arch_write_trylock \n"
|
||||||
" li %2, 0 \n"
|
" li %2, 0 \n"
|
||||||
"1: ll %1, %3 \n"
|
"1: ll %1, %3 \n"
|
||||||
" bnez %1, 2f \n"
|
" bnez %1, 2f \n"
|
||||||
|
@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* _ASM_SPINLOCK_H */
|
#endif /* _ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -12,14 +12,14 @@ typedef struct {
|
||||||
* bits 15..28: ticket
|
* bits 15..28: ticket
|
||||||
*/
|
*/
|
||||||
unsigned int lock;
|
unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||||
|
|
|
@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
|
||||||
|
|
||||||
if (current_cpu_type() == CPU_VR4111 ||
|
if (current_cpu_type() == CPU_VR4111 ||
|
||||||
current_cpu_type() == CPU_VR4121) {
|
current_cpu_type() == CPU_VR4121) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_set(MPIUINTREG, mask);
|
icu1_set(MPIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask)
|
||||||
|
|
||||||
if (current_cpu_type() == CPU_VR4111 ||
|
if (current_cpu_type() == CPU_VR4111 ||
|
||||||
current_cpu_type() == CPU_VR4121) {
|
current_cpu_type() == CPU_VR4121) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_clear(MPIUINTREG, mask);
|
icu1_clear(MPIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
|
||||||
|
|
||||||
if (current_cpu_type() == CPU_VR4111 ||
|
if (current_cpu_type() == CPU_VR4111 ||
|
||||||
current_cpu_type() == CPU_VR4121) {
|
current_cpu_type() == CPU_VR4121) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_set(MAIUINTREG, mask);
|
icu1_set(MAIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask)
|
||||||
|
|
||||||
if (current_cpu_type() == CPU_VR4111 ||
|
if (current_cpu_type() == CPU_VR4111 ||
|
||||||
current_cpu_type() == CPU_VR4121) {
|
current_cpu_type() == CPU_VR4121) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_clear(MAIUINTREG, mask);
|
icu1_clear(MAIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
|
||||||
|
|
||||||
if (current_cpu_type() == CPU_VR4111 ||
|
if (current_cpu_type() == CPU_VR4111 ||
|
||||||
current_cpu_type() == CPU_VR4121) {
|
current_cpu_type() == CPU_VR4121) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_set(MKIUINTREG, mask);
|
icu1_set(MKIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask)
|
||||||
|
|
||||||
if (current_cpu_type() == CPU_VR4111 ||
|
if (current_cpu_type() == CPU_VR4111 ||
|
||||||
current_cpu_type() == CPU_VR4121) {
|
current_cpu_type() == CPU_VR4121) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_clear(MKIUINTREG, mask);
|
icu1_clear(MKIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
|
||||||
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
|
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_set(MMACINTREG, mask);
|
icu1_set(MMACINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(vr41xx_enable_macint);
|
EXPORT_SYMBOL(vr41xx_enable_macint);
|
||||||
|
@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask)
|
||||||
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
|
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_clear(MMACINTREG, mask);
|
icu1_clear(MMACINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(vr41xx_disable_macint);
|
EXPORT_SYMBOL(vr41xx_disable_macint);
|
||||||
|
@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask)
|
||||||
struct irq_desc *desc = irq_desc + DSIU_IRQ;
|
struct irq_desc *desc = irq_desc + DSIU_IRQ;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_set(MDSIUINTREG, mask);
|
icu1_set(MDSIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(vr41xx_enable_dsiuint);
|
EXPORT_SYMBOL(vr41xx_enable_dsiuint);
|
||||||
|
@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask)
|
||||||
struct irq_desc *desc = irq_desc + DSIU_IRQ;
|
struct irq_desc *desc = irq_desc + DSIU_IRQ;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu1_clear(MDSIUINTREG, mask);
|
icu1_clear(MDSIUINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(vr41xx_disable_dsiuint);
|
EXPORT_SYMBOL(vr41xx_disable_dsiuint);
|
||||||
|
@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
|
||||||
struct irq_desc *desc = irq_desc + FIR_IRQ;
|
struct irq_desc *desc = irq_desc + FIR_IRQ;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_set(MFIRINTREG, mask);
|
icu2_set(MFIRINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(vr41xx_enable_firint);
|
EXPORT_SYMBOL(vr41xx_enable_firint);
|
||||||
|
@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask)
|
||||||
struct irq_desc *desc = irq_desc + FIR_IRQ;
|
struct irq_desc *desc = irq_desc + FIR_IRQ;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_clear(MFIRINTREG, mask);
|
icu2_clear(MFIRINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(vr41xx_disable_firint);
|
EXPORT_SYMBOL(vr41xx_disable_firint);
|
||||||
|
@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_write(MPCIINTREG, PCIINT0);
|
icu2_write(MPCIINTREG, PCIINT0);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_write(MPCIINTREG, 0);
|
icu2_write(MPCIINTREG, 0);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_write(MSCUINTREG, SCUINT0);
|
icu2_write(MSCUINTREG, SCUINT0);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_write(MSCUINTREG, 0);
|
icu2_write(MSCUINTREG, 0);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_set(MCSIINTREG, mask);
|
icu2_set(MCSIINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_clear(MCSIINTREG, mask);
|
icu2_clear(MCSIINTREG, mask);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_write(MBCUINTREG, BCUINTR);
|
icu2_write(MBCUINTREG, BCUINTR);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
|
||||||
if (current_cpu_type() == CPU_VR4122 ||
|
if (current_cpu_type() == CPU_VR4122 ||
|
||||||
current_cpu_type() == CPU_VR4131 ||
|
current_cpu_type() == CPU_VR4131 ||
|
||||||
current_cpu_type() == CPU_VR4133) {
|
current_cpu_type() == CPU_VR4133) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
icu2_write(MBCUINTREG, 0);
|
icu2_write(MBCUINTREG, 0);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
|
||||||
|
|
||||||
pin = SYSINT1_IRQ_TO_PIN(irq);
|
pin = SYSINT1_IRQ_TO_PIN(irq);
|
||||||
|
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
|
|
||||||
intassign0 = icu1_read(INTASSIGN0);
|
intassign0 = icu1_read(INTASSIGN0);
|
||||||
intassign1 = icu1_read(INTASSIGN1);
|
intassign1 = icu1_read(INTASSIGN1);
|
||||||
|
@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
|
||||||
intassign1 |= (uint16_t)assign << 9;
|
intassign1 |= (uint16_t)assign << 9;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
|
||||||
icu1_write(INTASSIGN0, intassign0);
|
icu1_write(INTASSIGN0, intassign0);
|
||||||
icu1_write(INTASSIGN1, intassign1);
|
icu1_write(INTASSIGN1, intassign1);
|
||||||
|
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
|
||||||
|
|
||||||
pin = SYSINT2_IRQ_TO_PIN(irq);
|
pin = SYSINT2_IRQ_TO_PIN(irq);
|
||||||
|
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
|
|
||||||
intassign2 = icu1_read(INTASSIGN2);
|
intassign2 = icu1_read(INTASSIGN2);
|
||||||
intassign3 = icu1_read(INTASSIGN3);
|
intassign3 = icu1_read(INTASSIGN3);
|
||||||
|
@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
|
||||||
intassign3 |= (uint16_t)assign << 12;
|
intassign3 |= (uint16_t)assign << 12;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
|
||||||
icu1_write(INTASSIGN2, intassign2);
|
icu1_write(INTASSIGN2, intassign2);
|
||||||
icu1_write(INTASSIGN3, intassign3);
|
icu1_write(INTASSIGN3, intassign3);
|
||||||
|
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
/* display information rows, one per active CPU */
|
/* display information rows, one per active CPU */
|
||||||
case 1 ... NR_IRQS - 1:
|
case 1 ... NR_IRQS - 1:
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
|
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (action) {
|
if (action) {
|
||||||
|
@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* polish off with NMI and error counters */
|
/* polish off with NMI and error counters */
|
||||||
|
|
|
@ -27,19 +27,19 @@
|
||||||
# define ATOMIC_HASH_SIZE 4
|
# define ATOMIC_HASH_SIZE 4
|
||||||
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
||||||
|
|
||||||
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
||||||
|
|
||||||
/* Can't use raw_spin_lock_irq because of #include problems, so
|
/* Can't use raw_spin_lock_irq because of #include problems, so
|
||||||
* this is the substitute */
|
* this is the substitute */
|
||||||
#define _atomic_spin_lock_irqsave(l,f) do { \
|
#define _atomic_spin_lock_irqsave(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
local_irq_save(f); \
|
local_irq_save(f); \
|
||||||
__raw_spin_lock(s); \
|
arch_spin_lock(s); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
__raw_spin_unlock(s); \
|
arch_spin_unlock(s); \
|
||||||
local_irq_restore(f); \
|
local_irq_restore(f); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
|
|
@ -5,17 +5,17 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/spinlock_types.h>
|
#include <asm/spinlock_types.h>
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
static inline int arch_spin_is_locked(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a = __ldcw_align(x);
|
volatile unsigned int *a = __ldcw_align(x);
|
||||||
return *a == 0;
|
return *a == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
|
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define arch_spin_unlock_wait(x) \
|
||||||
do { cpu_relax(); } while (__raw_spin_is_locked(x))
|
do { cpu_relax(); } while (arch_spin_is_locked(x))
|
||||||
|
|
||||||
static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
|
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
|
@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *x)
|
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
mb();
|
mb();
|
||||||
|
@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *x)
|
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||||
{
|
{
|
||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
|
||||||
|
|
||||||
/* Note that we have to ensure interrupts are disabled in case we're
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
* interrupted by some other code that wants to grab the same read lock */
|
* interrupted by some other code that wants to grab the same read lock */
|
||||||
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
|
static __inline__ void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__raw_spin_lock_flags(&rw->lock, flags);
|
arch_spin_lock_flags(&rw->lock, flags);
|
||||||
rw->counter++;
|
rw->counter++;
|
||||||
__raw_spin_unlock(&rw->lock);
|
arch_spin_unlock(&rw->lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note that we have to ensure interrupts are disabled in case we're
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
* interrupted by some other code that wants to grab the same read lock */
|
* interrupted by some other code that wants to grab the same read lock */
|
||||||
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
|
static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__raw_spin_lock_flags(&rw->lock, flags);
|
arch_spin_lock_flags(&rw->lock, flags);
|
||||||
rw->counter--;
|
rw->counter--;
|
||||||
__raw_spin_unlock(&rw->lock);
|
arch_spin_unlock(&rw->lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note that we have to ensure interrupts are disabled in case we're
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
* interrupted by some other code that wants to grab the same read lock */
|
* interrupted by some other code that wants to grab the same read lock */
|
||||||
static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
|
static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
retry:
|
retry:
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
if (__raw_spin_trylock(&rw->lock)) {
|
if (arch_spin_trylock(&rw->lock)) {
|
||||||
rw->counter++;
|
rw->counter++;
|
||||||
__raw_spin_unlock(&rw->lock);
|
arch_spin_unlock(&rw->lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Wait until we have a realistic chance at the lock */
|
/* Wait until we have a realistic chance at the lock */
|
||||||
while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
|
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
||||||
goto retry;
|
goto retry;
|
||||||
|
@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
|
|
||||||
/* Note that we have to ensure interrupts are disabled in case we're
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
* interrupted by some other code that wants to read_trylock() this lock */
|
* interrupted by some other code that wants to read_trylock() this lock */
|
||||||
static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
|
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
retry:
|
retry:
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
__raw_spin_lock_flags(&rw->lock, flags);
|
arch_spin_lock_flags(&rw->lock, flags);
|
||||||
|
|
||||||
if (rw->counter != 0) {
|
if (rw->counter != 0) {
|
||||||
__raw_spin_unlock(&rw->lock);
|
arch_spin_unlock(&rw->lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
while (rw->counter != 0)
|
while (rw->counter != 0)
|
||||||
|
@ -141,27 +141,27 @@ retry:
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
|
static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
rw->counter = 0;
|
rw->counter = 0;
|
||||||
__raw_spin_unlock(&rw->lock);
|
arch_spin_unlock(&rw->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note that we have to ensure interrupts are disabled in case we're
|
/* Note that we have to ensure interrupts are disabled in case we're
|
||||||
* interrupted by some other code that wants to read_trylock() this lock */
|
* interrupted by some other code that wants to read_trylock() this lock */
|
||||||
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
|
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int result = 0;
|
int result = 0;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
if (__raw_spin_trylock(&rw->lock)) {
|
if (arch_spin_trylock(&rw->lock)) {
|
||||||
if (rw->counter == 0) {
|
if (rw->counter == 0) {
|
||||||
rw->counter = -1;
|
rw->counter = -1;
|
||||||
result = 1;
|
result = 1;
|
||||||
} else {
|
} else {
|
||||||
/* Read-locked. Oh well. */
|
/* Read-locked. Oh well. */
|
||||||
__raw_spin_unlock(&rw->lock);
|
arch_spin_unlock(&rw->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
* read_can_lock - would read_trylock() succeed?
|
* read_can_lock - would read_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
|
static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return rw->counter >= 0;
|
return rw->counter >= 0;
|
||||||
}
|
}
|
||||||
|
@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
|
||||||
* write_can_lock - would write_trylock() succeed?
|
* write_can_lock - would write_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
|
static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return !rw->counter;
|
return !rw->counter;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -4,18 +4,18 @@
|
||||||
typedef struct {
|
typedef struct {
|
||||||
#ifdef CONFIG_PA20
|
#ifdef CONFIG_PA20
|
||||||
volatile unsigned int slock;
|
volatile unsigned int slock;
|
||||||
# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||||
#else
|
#else
|
||||||
volatile unsigned int lock[4];
|
volatile unsigned int lock[4];
|
||||||
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||||
#endif
|
#endif
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
raw_spinlock_t lock;
|
arch_spinlock_t lock;
|
||||||
volatile int counter;
|
volatile int counter;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
struct irqaction *action;
|
struct irqaction *action;
|
||||||
|
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -12,8 +12,8 @@
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
|
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
|
||||||
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
|
[0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ struct rtas_t {
|
||||||
unsigned long entry; /* physical address pointer */
|
unsigned long entry; /* physical address pointer */
|
||||||
unsigned long base; /* physical address pointer */
|
unsigned long base; /* physical address pointer */
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
raw_spinlock_t lock;
|
arch_spinlock_t lock;
|
||||||
struct rtas_args args;
|
struct rtas_args args;
|
||||||
struct device_node *dev; /* virtual address pointer */
|
struct device_node *dev; /* virtual address pointer */
|
||||||
};
|
};
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
#include <asm/asm-compat.h>
|
#include <asm/asm-compat.h>
|
||||||
#include <asm/synch.h>
|
#include <asm/synch.h>
|
||||||
|
|
||||||
#define __raw_spin_is_locked(x) ((x)->slock != 0)
|
#define arch_spin_is_locked(x) ((x)->slock != 0)
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
/* use 0x800000yy when locked, where yy == CPU number */
|
/* use 0x800000yy when locked, where yy == CPU number */
|
||||||
|
@ -54,7 +54,7 @@
|
||||||
* This returns the old value in the lock, so we succeeded
|
* This returns the old value in the lock, so we succeeded
|
||||||
* in getting the lock if the return value is 0.
|
* in getting the lock if the return value is 0.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
|
static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp, token;
|
unsigned long tmp, token;
|
||||||
|
|
||||||
|
@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
CLEAR_IO_SYNC;
|
CLEAR_IO_SYNC;
|
||||||
return arch_spin_trylock(lock) == 0;
|
return __arch_spin_trylock(lock) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
|
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
|
||||||
/* We only yield to the hypervisor if we are in shared processor mode */
|
/* We only yield to the hypervisor if we are in shared processor mode */
|
||||||
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
|
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
|
||||||
extern void __spin_yield(raw_spinlock_t *lock);
|
extern void __spin_yield(arch_spinlock_t *lock);
|
||||||
extern void __rw_yield(raw_rwlock_t *lock);
|
extern void __rw_yield(arch_rwlock_t *lock);
|
||||||
#else /* SPLPAR || ISERIES */
|
#else /* SPLPAR || ISERIES */
|
||||||
#define __spin_yield(x) barrier()
|
#define __spin_yield(x) barrier()
|
||||||
#define __rw_yield(x) barrier()
|
#define __rw_yield(x) barrier()
|
||||||
#define SHARED_PROCESSOR 0
|
#define SHARED_PROCESSOR 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
CLEAR_IO_SYNC;
|
CLEAR_IO_SYNC;
|
||||||
while (1) {
|
while (1) {
|
||||||
if (likely(arch_spin_trylock(lock) == 0))
|
if (likely(__arch_spin_trylock(lock) == 0))
|
||||||
break;
|
break;
|
||||||
do {
|
do {
|
||||||
HMT_low();
|
HMT_low();
|
||||||
|
@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
unsigned long flags_dis;
|
unsigned long flags_dis;
|
||||||
|
|
||||||
CLEAR_IO_SYNC;
|
CLEAR_IO_SYNC;
|
||||||
while (1) {
|
while (1) {
|
||||||
if (likely(arch_spin_trylock(lock) == 0))
|
if (likely(__arch_spin_trylock(lock) == 0))
|
||||||
break;
|
break;
|
||||||
local_save_flags(flags_dis);
|
local_save_flags(flags_dis);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
SYNC_IO;
|
SYNC_IO;
|
||||||
__asm__ __volatile__("# __raw_spin_unlock\n\t"
|
__asm__ __volatile__("# arch_spin_unlock\n\t"
|
||||||
LWSYNC_ON_SMP: : :"memory");
|
LWSYNC_ON_SMP: : :"memory");
|
||||||
lock->slock = 0;
|
lock->slock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
|
extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
|
||||||
#else
|
#else
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define arch_spin_unlock_wait(lock) \
|
||||||
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
|
||||||
* read-locks.
|
* read-locks.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
|
#define arch_read_can_lock(rw) ((rw)->lock >= 0)
|
||||||
#define __raw_write_can_lock(rw) (!(rw)->lock)
|
#define arch_write_can_lock(rw) (!(rw)->lock)
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
|
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
|
||||||
|
@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
|
||||||
* This returns the old value in the lock + 1,
|
* This returns the old value in the lock + 1,
|
||||||
* so we got a read lock if the return value is > 0.
|
* so we got a read lock if the return value is > 0.
|
||||||
*/
|
*/
|
||||||
static inline long arch_read_trylock(raw_rwlock_t *rw)
|
static inline long __arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
long tmp;
|
long tmp;
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw)
|
||||||
* This returns the old value in the lock,
|
* This returns the old value in the lock,
|
||||||
* so we got the write lock if the return value is 0.
|
* so we got the write lock if the return value is 0.
|
||||||
*/
|
*/
|
||||||
static inline long arch_write_trylock(raw_rwlock_t *rw)
|
static inline long __arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
long tmp, token;
|
long tmp, token;
|
||||||
|
|
||||||
|
@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
while (1) {
|
while (1) {
|
||||||
if (likely(arch_read_trylock(rw) > 0))
|
if (likely(__arch_read_trylock(rw) > 0))
|
||||||
break;
|
break;
|
||||||
do {
|
do {
|
||||||
HMT_low();
|
HMT_low();
|
||||||
|
@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
while (1) {
|
while (1) {
|
||||||
if (likely(arch_write_trylock(rw) == 0))
|
if (likely(__arch_write_trylock(rw) == 0))
|
||||||
break;
|
break;
|
||||||
do {
|
do {
|
||||||
HMT_low();
|
HMT_low();
|
||||||
|
@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return arch_read_trylock(rw) > 0;
|
return __arch_read_trylock(rw) > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
return arch_write_trylock(rw) == 0;
|
return __arch_write_trylock(rw) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
long tmp;
|
long tmp;
|
||||||
|
|
||||||
|
@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
: "cr0", "xer", "memory");
|
: "cr0", "xer", "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__("# write_unlock\n\t"
|
__asm__ __volatile__("# write_unlock\n\t"
|
||||||
LWSYNC_ON_SMP: : :"memory");
|
LWSYNC_ON_SMP: : :"memory");
|
||||||
rw->lock = 0;
|
rw->lock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) __spin_yield(lock)
|
#define arch_spin_relax(lock) __spin_yield(lock)
|
||||||
#define _raw_read_relax(lock) __rw_yield(lock)
|
#define arch_read_relax(lock) __rw_yield(lock)
|
||||||
#define _raw_write_relax(lock) __rw_yield(lock)
|
#define arch_write_relax(lock) __rw_yield(lock)
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -7,14 +7,14 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int slock;
|
volatile unsigned int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile signed int lock;
|
volatile signed int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
action = desc->action;
|
action = desc->action;
|
||||||
if (!action || !action->handler)
|
if (!action || !action->handler)
|
||||||
|
@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
|
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
|
||||||
if (!desc)
|
if (!desc)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
if (desc->action && desc->action->handler) {
|
if (desc->action && desc->action->handler) {
|
||||||
seq_printf(m, "%5d ", i);
|
seq_printf(m, "%5d ", i);
|
||||||
|
@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
|
||||||
seq_printf(m, "%s\n", p);
|
seq_printf(m, "%s\n", p);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
|
|
||||||
struct rtas_t rtas = {
|
struct rtas_t rtas = {
|
||||||
.lock = __RAW_SPIN_LOCK_UNLOCKED
|
.lock = __ARCH_SPIN_LOCK_UNLOCKED
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(rtas);
|
EXPORT_SYMBOL(rtas);
|
||||||
|
|
||||||
|
@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
__raw_spin_lock_flags(&rtas.lock, flags);
|
arch_spin_lock_flags(&rtas.lock, flags);
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unlock_rtas(unsigned long flags)
|
static void unlock_rtas(unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_unlock(&rtas.lock);
|
arch_spin_unlock(&rtas.lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static raw_spinlock_t timebase_lock;
|
static arch_spinlock_t timebase_lock;
|
||||||
static u64 timebase = 0;
|
static u64 timebase = 0;
|
||||||
|
|
||||||
void __cpuinit rtas_give_timebase(void)
|
void __cpuinit rtas_give_timebase(void)
|
||||||
|
@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
__raw_spin_lock(&timebase_lock);
|
arch_spin_lock(&timebase_lock);
|
||||||
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
|
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
|
||||||
timebase = get_tb();
|
timebase = get_tb();
|
||||||
__raw_spin_unlock(&timebase_lock);
|
arch_spin_unlock(&timebase_lock);
|
||||||
|
|
||||||
while (timebase)
|
while (timebase)
|
||||||
barrier();
|
barrier();
|
||||||
|
@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
|
||||||
{
|
{
|
||||||
while (!timebase)
|
while (!timebase)
|
||||||
barrier();
|
barrier();
|
||||||
__raw_spin_lock(&timebase_lock);
|
arch_spin_lock(&timebase_lock);
|
||||||
set_tb(timebase >> 32, timebase & 0xffffffff);
|
set_tb(timebase >> 32, timebase & 0xffffffff);
|
||||||
timebase = 0;
|
timebase = 0;
|
||||||
__raw_spin_unlock(&timebase_lock);
|
arch_spin_unlock(&timebase_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/firmware.h>
|
#include <asm/firmware.h>
|
||||||
|
|
||||||
void __spin_yield(raw_spinlock_t *lock)
|
void __spin_yield(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int lock_value, holder_cpu, yield_count;
|
unsigned int lock_value, holder_cpu, yield_count;
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock)
|
||||||
* This turns out to be the same for read and write locks, since
|
* This turns out to be the same for read and write locks, since
|
||||||
* we only know the holder if it is write-locked.
|
* we only know the holder if it is write-locked.
|
||||||
*/
|
*/
|
||||||
void __rw_yield(raw_rwlock_t *rw)
|
void __rw_yield(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int lock_value;
|
int lock_value;
|
||||||
unsigned int holder_cpu, yield_count;
|
unsigned int holder_cpu, yield_count;
|
||||||
|
@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (lock->slock) {
|
while (lock->slock) {
|
||||||
HMT_low();
|
HMT_low();
|
||||||
|
@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||||
HMT_medium();
|
HMT_medium();
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(__raw_spin_unlock_wait);
|
EXPORT_SYMBOL(arch_spin_unlock_wait);
|
||||||
|
|
|
@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
|
||||||
u32 status, enable;
|
u32 status, enable;
|
||||||
|
|
||||||
/* Mask off the cascaded IRQ */
|
/* Mask off the cascaded IRQ */
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
desc->chip->mask(virq);
|
desc->chip->mask(virq);
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
|
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
|
||||||
* are pending. 'ffs()' is 1 based */
|
* are pending. 'ffs()' is 1 based */
|
||||||
|
@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Processing done; can reenable the cascade now */
|
/* Processing done; can reenable the cascade now */
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
desc->chip->ack(virq);
|
desc->chip->ack(virq);
|
||||||
if (!(desc->status & IRQ_DISABLED))
|
if (!(desc->status & IRQ_DISABLED))
|
||||||
desc->chip->unmask(virq);
|
desc->chip->unmask(virq);
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
|
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
|
||||||
|
|
|
@ -237,7 +237,7 @@ extern int noirqdebug;
|
||||||
|
|
||||||
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
|
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||||
|
|
||||||
|
@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
goto out_eoi;
|
goto out_eoi;
|
||||||
|
|
||||||
desc->status &= ~IRQ_PENDING;
|
desc->status &= ~IRQ_PENDING;
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
action_ret = handle_IRQ_event(irq, action);
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
|
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
|
||||||
|
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
out_eoi:
|
out_eoi:
|
||||||
desc->chip->eoi(irq);
|
desc->chip->eoi(irq);
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iic_host_map(struct irq_host *h, unsigned int virq,
|
static int iic_host_map(struct irq_host *h, unsigned int virq,
|
||||||
|
|
|
@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs()
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
if (desc && desc->chip && desc->chip->startup) {
|
if (desc && desc->chip && desc->chip->startup) {
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->chip->startup(irq);
|
desc->chip->startup(irq);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static raw_spinlock_t timebase_lock;
|
static arch_spinlock_t timebase_lock;
|
||||||
static unsigned long timebase;
|
static unsigned long timebase;
|
||||||
|
|
||||||
static void __devinit pas_give_timebase(void)
|
static void __devinit pas_give_timebase(void)
|
||||||
|
@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void)
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
__raw_spin_lock(&timebase_lock);
|
arch_spin_lock(&timebase_lock);
|
||||||
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
|
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
|
||||||
isync();
|
isync();
|
||||||
timebase = get_tb();
|
timebase = get_tb();
|
||||||
__raw_spin_unlock(&timebase_lock);
|
arch_spin_unlock(&timebase_lock);
|
||||||
|
|
||||||
while (timebase)
|
while (timebase)
|
||||||
barrier();
|
barrier();
|
||||||
|
@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void)
|
||||||
while (!timebase)
|
while (!timebase)
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
|
|
||||||
__raw_spin_lock(&timebase_lock);
|
arch_spin_lock(&timebase_lock);
|
||||||
set_tb(timebase >> 32, timebase & 0xffffffff);
|
set_tb(timebase >> 32, timebase & 0xffffffff);
|
||||||
timebase = 0;
|
timebase = 0;
|
||||||
__raw_spin_unlock(&timebase_lock);
|
arch_spin_unlock(&timebase_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct smp_ops_t pas_smp_ops = {
|
struct smp_ops_t pas_smp_ops = {
|
||||||
|
|
|
@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void)
|
||||||
|| desc->chip->set_affinity == NULL)
|
|| desc->chip->set_affinity == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
|
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
|
||||||
if (status) {
|
if (status) {
|
||||||
|
@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void)
|
||||||
cpumask_setall(irq_to_desc(virq)->affinity);
|
cpumask_setall(irq_to_desc(virq)->affinity);
|
||||||
desc->chip->set_affinity(virq, cpu_all_mask);
|
desc->chip->set_affinity(virq, cpu_all_mask);
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
|
||||||
u32 intr_index;
|
u32 intr_index;
|
||||||
u32 have_shift = 0;
|
u32 have_shift = 0;
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
|
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
|
||||||
if (desc->chip->mask_ack)
|
if (desc->chip->mask_ack)
|
||||||
desc->chip->mask_ack(irq);
|
desc->chip->mask_ack(irq);
|
||||||
|
@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __devinit fsl_of_msi_probe(struct of_device *dev,
|
static int __devinit fsl_of_msi_probe(struct of_device *dev,
|
||||||
|
|
|
@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
|
||||||
int src;
|
int src;
|
||||||
int subvirq;
|
int subvirq;
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if (desc->status & IRQ_LEVEL)
|
if (desc->status & IRQ_LEVEL)
|
||||||
desc->chip->mask(virq);
|
desc->chip->mask(virq);
|
||||||
else
|
else
|
||||||
desc->chip->mask_ack(virq);
|
desc->chip->mask_ack(virq);
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
msr = mfdcr(uic->dcrbase + UIC_MSR);
|
msr = mfdcr(uic->dcrbase + UIC_MSR);
|
||||||
if (!msr) /* spurious interrupt */
|
if (!msr) /* spurious interrupt */
|
||||||
|
@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
|
||||||
generic_handle_irq(subvirq);
|
generic_handle_irq(subvirq);
|
||||||
|
|
||||||
uic_irq_ret:
|
uic_irq_ret:
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if (desc->status & IRQ_LEVEL)
|
if (desc->status & IRQ_LEVEL)
|
||||||
desc->chip->ack(virq);
|
desc->chip->ack(virq);
|
||||||
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
|
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
|
||||||
desc->chip->unmask(virq);
|
desc->chip->unmask(virq);
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct uic * __init uic_init_one(struct device_node *node)
|
static struct uic * __init uic_init_one(struct device_node *node)
|
||||||
|
|
|
@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,
|
||||||
* (the type definitions are in asm/spinlock_types.h)
|
* (the type definitions are in asm/spinlock_types.h)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
|
#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define arch_spin_unlock_wait(lock) \
|
||||||
do { while (__raw_spin_is_locked(lock)) \
|
do { while (arch_spin_is_locked(lock)) \
|
||||||
_raw_spin_relax(lock); } while (0)
|
arch_spin_relax(lock); } while (0)
|
||||||
|
|
||||||
extern void _raw_spin_lock_wait(raw_spinlock_t *);
|
extern void arch_spin_lock_wait(arch_spinlock_t *);
|
||||||
extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
|
extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
||||||
extern int _raw_spin_trylock_retry(raw_spinlock_t *);
|
extern int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||||
extern void _raw_spin_relax(raw_spinlock_t *lock);
|
extern void arch_spin_relax(arch_spinlock_t *lock);
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lp)
|
static inline void arch_spin_lock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
int old;
|
int old;
|
||||||
|
|
||||||
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||||
if (likely(old == 0))
|
if (likely(old == 0))
|
||||||
return;
|
return;
|
||||||
_raw_spin_lock_wait(lp);
|
arch_spin_lock_wait(lp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
|
static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
int old;
|
int old;
|
||||||
|
@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
|
||||||
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||||
if (likely(old == 0))
|
if (likely(old == 0))
|
||||||
return;
|
return;
|
||||||
_raw_spin_lock_wait_flags(lp, flags);
|
arch_spin_lock_wait_flags(lp, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lp)
|
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
int old;
|
int old;
|
||||||
|
|
||||||
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
|
||||||
if (likely(old == 0))
|
if (likely(old == 0))
|
||||||
return 1;
|
return 1;
|
||||||
return _raw_spin_trylock_retry(lp);
|
return arch_spin_trylock_retry(lp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lp)
|
static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
|
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
|
||||||
}
|
}
|
||||||
|
@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
|
||||||
* read_can_lock - would read_trylock() succeed?
|
* read_can_lock - would read_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
|
#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_can_lock - would write_trylock() succeed?
|
* write_can_lock - would write_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_write_can_lock(x) ((x)->lock == 0)
|
#define arch_write_can_lock(x) ((x)->lock == 0)
|
||||||
|
|
||||||
extern void _raw_read_lock_wait(raw_rwlock_t *lp);
|
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
|
||||||
extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
|
extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
|
||||||
extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
|
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
|
||||||
extern void _raw_write_lock_wait(raw_rwlock_t *lp);
|
extern void _raw_write_lock_wait(arch_rwlock_t *lp);
|
||||||
extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
|
extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
|
||||||
extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
|
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
old = rw->lock & 0x7fffffffU;
|
old = rw->lock & 0x7fffffffU;
|
||||||
|
@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
_raw_read_lock_wait(rw);
|
_raw_read_lock_wait(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
|
static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
old = rw->lock & 0x7fffffffU;
|
old = rw->lock & 0x7fffffffU;
|
||||||
|
@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
_raw_read_lock_wait_flags(rw, flags);
|
_raw_read_lock_wait_flags(rw, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old, cmp;
|
unsigned int old, cmp;
|
||||||
|
|
||||||
|
@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
} while (cmp != old);
|
} while (cmp != old);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
|
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
|
||||||
_raw_write_lock_wait(rw);
|
_raw_write_lock_wait(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
|
static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||||
{
|
{
|
||||||
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
|
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
|
||||||
_raw_write_lock_wait_flags(rw, flags);
|
_raw_write_lock_wait_flags(rw, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
|
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
old = rw->lock & 0x7fffffffU;
|
old = rw->lock & 0x7fffffffU;
|
||||||
|
@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
return _raw_read_trylock_retry(rw);
|
return _raw_read_trylock_retry(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
|
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
|
||||||
return 1;
|
return 1;
|
||||||
return _raw_write_trylock_retry(rw);
|
return _raw_write_trylock_retry(rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* __ASM_SPINLOCK_H */
|
#endif /* __ASM_SPINLOCK_H */
|
||||||
|
|
|
@ -7,14 +7,14 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int owner_cpu;
|
volatile unsigned int owner_cpu;
|
||||||
} __attribute__ ((aligned (4))) raw_spinlock_t;
|
} __attribute__ ((aligned (4))) arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
|
||||||
_raw_yield();
|
_raw_yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
void _raw_spin_lock_wait(raw_spinlock_t *lp)
|
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
|
||||||
_raw_yield_cpu(~owner);
|
_raw_yield_cpu(~owner);
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (__raw_spin_is_locked(lp))
|
if (arch_spin_is_locked(lp))
|
||||||
continue;
|
continue;
|
||||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_lock_wait);
|
EXPORT_SYMBOL(arch_spin_lock_wait);
|
||||||
|
|
||||||
void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
|
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
|
@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
|
||||||
_raw_yield_cpu(~owner);
|
_raw_yield_cpu(~owner);
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (__raw_spin_is_locked(lp))
|
if (arch_spin_is_locked(lp))
|
||||||
continue;
|
continue;
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||||
|
@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
|
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
||||||
|
|
||||||
int _raw_spin_trylock_retry(raw_spinlock_t *lp)
|
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||||
{
|
{
|
||||||
unsigned int cpu = ~smp_processor_id();
|
unsigned int cpu = ~smp_processor_id();
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
for (count = spin_retry; count > 0; count--) {
|
for (count = spin_retry; count > 0; count--) {
|
||||||
if (__raw_spin_is_locked(lp))
|
if (arch_spin_is_locked(lp))
|
||||||
continue;
|
continue;
|
||||||
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_trylock_retry);
|
EXPORT_SYMBOL(arch_spin_trylock_retry);
|
||||||
|
|
||||||
void _raw_spin_relax(raw_spinlock_t *lock)
|
void arch_spin_relax(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int cpu = lock->owner_cpu;
|
unsigned int cpu = lock->owner_cpu;
|
||||||
if (cpu != 0)
|
if (cpu != 0)
|
||||||
_raw_yield_cpu(~cpu);
|
_raw_yield_cpu(~cpu);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_spin_relax);
|
EXPORT_SYMBOL(arch_spin_relax);
|
||||||
|
|
||||||
void _raw_read_lock_wait(raw_rwlock_t *rw)
|
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
|
||||||
_raw_yield();
|
_raw_yield();
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (!__raw_read_can_lock(rw))
|
if (!arch_read_can_lock(rw))
|
||||||
continue;
|
continue;
|
||||||
old = rw->lock & 0x7fffffffU;
|
old = rw->lock & 0x7fffffffU;
|
||||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
||||||
|
@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_read_lock_wait);
|
EXPORT_SYMBOL(_raw_read_lock_wait);
|
||||||
|
|
||||||
void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
_raw_yield();
|
_raw_yield();
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (!__raw_read_can_lock(rw))
|
if (!arch_read_can_lock(rw))
|
||||||
continue;
|
continue;
|
||||||
old = rw->lock & 0x7fffffffU;
|
old = rw->lock & 0x7fffffffU;
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
|
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
|
||||||
|
|
||||||
int _raw_read_trylock_retry(raw_rwlock_t *rw)
|
int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int old;
|
unsigned int old;
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
||||||
while (count-- > 0) {
|
while (count-- > 0) {
|
||||||
if (!__raw_read_can_lock(rw))
|
if (!arch_read_can_lock(rw))
|
||||||
continue;
|
continue;
|
||||||
old = rw->lock & 0x7fffffffU;
|
old = rw->lock & 0x7fffffffU;
|
||||||
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
|
||||||
|
@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_read_trylock_retry);
|
EXPORT_SYMBOL(_raw_read_trylock_retry);
|
||||||
|
|
||||||
void _raw_write_lock_wait(raw_rwlock_t *rw)
|
void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
|
||||||
_raw_yield();
|
_raw_yield();
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (!__raw_write_can_lock(rw))
|
if (!arch_write_can_lock(rw))
|
||||||
continue;
|
continue;
|
||||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
||||||
return;
|
return;
|
||||||
|
@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||||
|
|
||||||
void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
_raw_yield();
|
_raw_yield();
|
||||||
count = spin_retry;
|
count = spin_retry;
|
||||||
}
|
}
|
||||||
if (!__raw_write_can_lock(rw))
|
if (!arch_write_can_lock(rw))
|
||||||
continue;
|
continue;
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
||||||
|
@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
|
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
|
||||||
|
|
||||||
int _raw_write_trylock_retry(raw_rwlock_t *rw)
|
int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
int count = spin_retry;
|
int count = spin_retry;
|
||||||
|
|
||||||
while (count-- > 0) {
|
while (count-- > 0) {
|
||||||
if (!__raw_write_can_lock(rw))
|
if (!arch_write_can_lock(rw))
|
||||||
continue;
|
continue;
|
||||||
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -23,10 +23,10 @@
|
||||||
* Your basic SMP spinlocks, allowing only a single CPU anywhere
|
* Your basic SMP spinlocks, allowing only a single CPU anywhere
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_spin_is_locked(x) ((x)->lock <= 0)
|
#define arch_spin_is_locked(x) ((x)->lock <= 0)
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
#define __raw_spin_unlock_wait(x) \
|
#define arch_spin_unlock_wait(x) \
|
||||||
do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
|
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simple spin lock operations. There are two variants, one clears IRQ's
|
* Simple spin lock operations. There are two variants, one clears IRQ's
|
||||||
|
@ -34,14 +34,14 @@
|
||||||
*
|
*
|
||||||
* We make no fairness assumptions. They have a cost.
|
* We make no fairness assumptions. They have a cost.
|
||||||
*/
|
*/
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
unsigned long oldval;
|
unsigned long oldval;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movli.l @%2, %0 ! __raw_spin_lock \n\t"
|
"movli.l @%2, %0 ! arch_spin_lock \n\t"
|
||||||
"mov %0, %1 \n\t"
|
"mov %0, %1 \n\t"
|
||||||
"mov #0, %0 \n\t"
|
"mov #0, %0 \n\t"
|
||||||
"movco.l %0, @%2 \n\t"
|
"movco.l %0, @%2 \n\t"
|
||||||
|
@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov #1, %0 ! __raw_spin_unlock \n\t"
|
"mov #1, %0 ! arch_spin_unlock \n\t"
|
||||||
"mov.l %0, @%1 \n\t"
|
"mov.l %0, @%1 \n\t"
|
||||||
: "=&z" (tmp)
|
: "=&z" (tmp)
|
||||||
: "r" (&lock->lock)
|
: "r" (&lock->lock)
|
||||||
|
@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp, oldval;
|
unsigned long tmp, oldval;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movli.l @%2, %0 ! __raw_spin_trylock \n\t"
|
"movli.l @%2, %0 ! arch_spin_trylock \n\t"
|
||||||
"mov %0, %1 \n\t"
|
"mov %0, %1 \n\t"
|
||||||
"mov #0, %0 \n\t"
|
"mov #0, %0 \n\t"
|
||||||
"movco.l %0, @%2 \n\t"
|
"movco.l %0, @%2 \n\t"
|
||||||
|
@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
* read_can_lock - would read_trylock() succeed?
|
* read_can_lock - would read_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_read_can_lock(x) ((x)->lock > 0)
|
#define arch_read_can_lock(x) ((x)->lock > 0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* write_can_lock - would write_trylock() succeed?
|
* write_can_lock - would write_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movli.l @%1, %0 ! __raw_read_lock \n\t"
|
"movli.l @%1, %0 ! arch_read_lock \n\t"
|
||||||
"cmp/pl %0 \n\t"
|
"cmp/pl %0 \n\t"
|
||||||
"bf 1b \n\t"
|
"bf 1b \n\t"
|
||||||
"add #-1, %0 \n\t"
|
"add #-1, %0 \n\t"
|
||||||
|
@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movli.l @%1, %0 ! __raw_read_unlock \n\t"
|
"movli.l @%1, %0 ! arch_read_unlock \n\t"
|
||||||
"add #1, %0 \n\t"
|
"add #1, %0 \n\t"
|
||||||
"movco.l %0, @%1 \n\t"
|
"movco.l %0, @%1 \n\t"
|
||||||
"bf 1b \n\t"
|
"bf 1b \n\t"
|
||||||
|
@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movli.l @%1, %0 ! __raw_write_lock \n\t"
|
"movli.l @%1, %0 ! arch_write_lock \n\t"
|
||||||
"cmp/hs %2, %0 \n\t"
|
"cmp/hs %2, %0 \n\t"
|
||||||
"bf 1b \n\t"
|
"bf 1b \n\t"
|
||||||
"sub %2, %0 \n\t"
|
"sub %2, %0 \n\t"
|
||||||
|
@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov.l %1, @%0 ! __raw_write_unlock \n\t"
|
"mov.l %1, @%0 ! arch_write_unlock \n\t"
|
||||||
:
|
:
|
||||||
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
|
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
|
||||||
: "t", "memory"
|
: "t", "memory"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp, oldval;
|
unsigned long tmp, oldval;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movli.l @%2, %0 ! __raw_read_trylock \n\t"
|
"movli.l @%2, %0 ! arch_read_trylock \n\t"
|
||||||
"mov %0, %1 \n\t"
|
"mov %0, %1 \n\t"
|
||||||
"cmp/pl %0 \n\t"
|
"cmp/pl %0 \n\t"
|
||||||
"bf 2f \n\t"
|
"bf 2f \n\t"
|
||||||
|
@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
||||||
return (oldval > 0);
|
return (oldval > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp, oldval;
|
unsigned long tmp, oldval;
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: \n\t"
|
"1: \n\t"
|
||||||
"movli.l @%2, %0 ! __raw_write_trylock \n\t"
|
"movli.l @%2, %0 ! arch_write_trylock \n\t"
|
||||||
"mov %0, %1 \n\t"
|
"mov %0, %1 \n\t"
|
||||||
"cmp/hs %3, %0 \n\t"
|
"cmp/hs %3, %0 \n\t"
|
||||||
"bf 2f \n\t"
|
"bf 2f \n\t"
|
||||||
|
@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
return (oldval > (RW_LOCK_BIAS - 1));
|
return (oldval > (RW_LOCK_BIAS - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* __ASM_SH_SPINLOCK_H */
|
#endif /* __ASM_SH_SPINLOCK_H */
|
||||||
|
|
|
@ -7,15 +7,15 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define RW_LOCK_BIAS 0x01000000
|
#define RW_LOCK_BIAS 0x01000000
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
any_count |= kstat_irqs_cpu(i, j);
|
any_count |= kstat_irqs_cpu(i, j);
|
||||||
action = desc->action;
|
action = desc->action;
|
||||||
|
@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -10,12 +10,12 @@
|
||||||
|
|
||||||
#include <asm/psr.h>
|
#include <asm/psr.h>
|
||||||
|
|
||||||
#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
|
#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
|
||||||
|
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define arch_spin_unlock_wait(lock) \
|
||||||
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"\n1:\n\t"
|
"\n1:\n\t"
|
||||||
|
@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
: "g2", "memory", "cc");
|
: "g2", "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned int result;
|
unsigned int result;
|
||||||
__asm__ __volatile__("ldstub [%1], %0"
|
__asm__ __volatile__("ldstub [%1], %0"
|
||||||
|
@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return (result == 0);
|
return (result == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
|
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
* Sort of like atomic_t's on Sparc, but even more clever.
|
* Sort of like atomic_t's on Sparc, but even more clever.
|
||||||
*
|
*
|
||||||
* ------------------------------------
|
* ------------------------------------
|
||||||
* | 24-bit counter | wlock | raw_rwlock_t
|
* | 24-bit counter | wlock | arch_rwlock_t
|
||||||
* ------------------------------------
|
* ------------------------------------
|
||||||
* 31 8 7 0
|
* 31 8 7 0
|
||||||
*
|
*
|
||||||
|
@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
*
|
*
|
||||||
* Unfortunately this scheme limits us to ~16,000,000 cpus.
|
* Unfortunately this scheme limits us to ~16,000,000 cpus.
|
||||||
*/
|
*/
|
||||||
static inline void arch_read_lock(raw_rwlock_t *rw)
|
static inline void __arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
register raw_rwlock_t *lp asm("g1");
|
register arch_rwlock_t *lp asm("g1");
|
||||||
lp = rw;
|
lp = rw;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"mov %%o7, %%g4\n\t"
|
"mov %%o7, %%g4\n\t"
|
||||||
|
@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
|
||||||
: "g2", "g4", "memory", "cc");
|
: "g2", "g4", "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock(lock) \
|
#define arch_read_lock(lock) \
|
||||||
do { unsigned long flags; \
|
do { unsigned long flags; \
|
||||||
local_irq_save(flags); \
|
local_irq_save(flags); \
|
||||||
arch_read_lock(lock); \
|
__arch_read_lock(lock); \
|
||||||
local_irq_restore(flags); \
|
local_irq_restore(flags); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
static inline void arch_read_unlock(raw_rwlock_t *rw)
|
static inline void __arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
register raw_rwlock_t *lp asm("g1");
|
register arch_rwlock_t *lp asm("g1");
|
||||||
lp = rw;
|
lp = rw;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"mov %%o7, %%g4\n\t"
|
"mov %%o7, %%g4\n\t"
|
||||||
|
@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
|
||||||
: "g2", "g4", "memory", "cc");
|
: "g2", "g4", "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_unlock(lock) \
|
#define arch_read_unlock(lock) \
|
||||||
do { unsigned long flags; \
|
do { unsigned long flags; \
|
||||||
local_irq_save(flags); \
|
local_irq_save(flags); \
|
||||||
arch_read_unlock(lock); \
|
__arch_read_unlock(lock); \
|
||||||
local_irq_restore(flags); \
|
local_irq_restore(flags); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
register raw_rwlock_t *lp asm("g1");
|
register arch_rwlock_t *lp asm("g1");
|
||||||
lp = rw;
|
lp = rw;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"mov %%o7, %%g4\n\t"
|
"mov %%o7, %%g4\n\t"
|
||||||
|
@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
*(volatile __u32 *)&lp->lock = ~0U;
|
*(volatile __u32 *)&lp->lock = ~0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned int val;
|
unsigned int val;
|
||||||
|
|
||||||
|
@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
||||||
return (val == 0);
|
return (val == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int arch_read_trylock(raw_rwlock_t *rw)
|
static inline int __arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
register raw_rwlock_t *lp asm("g1");
|
register arch_rwlock_t *lp asm("g1");
|
||||||
register int res asm("o0");
|
register int res asm("o0");
|
||||||
lp = rw;
|
lp = rw;
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
|
@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_trylock(lock) \
|
#define arch_read_trylock(lock) \
|
||||||
({ unsigned long flags; \
|
({ unsigned long flags; \
|
||||||
int res; \
|
int res; \
|
||||||
local_irq_save(flags); \
|
local_irq_save(flags); \
|
||||||
res = arch_read_trylock(lock); \
|
res = __arch_read_trylock(lock); \
|
||||||
local_irq_restore(flags); \
|
local_irq_restore(flags); \
|
||||||
res; \
|
res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
|
#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
|
||||||
|
|
||||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||||
#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
|
#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
|
||||||
#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
|
#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
|
#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
|
||||||
#define __raw_write_can_lock(rw) (!(rw)->lock)
|
#define arch_write_can_lock(rw) (!(rw)->lock)
|
||||||
|
|
||||||
#endif /* !(__ASSEMBLY__) */
|
#endif /* !(__ASSEMBLY__) */
|
||||||
|
|
||||||
|
|
|
@ -21,13 +21,13 @@
|
||||||
* the spinner sections must be pre-V9 branches.
|
* the spinner sections must be pre-V9 branches.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
|
#define arch_spin_is_locked(lp) ((lp)->lock != 0)
|
||||||
|
|
||||||
#define __raw_spin_unlock_wait(lp) \
|
#define arch_spin_unlock_wait(lp) \
|
||||||
do { rmb(); \
|
do { rmb(); \
|
||||||
} while((lp)->lock)
|
} while((lp)->lock)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long result;
|
unsigned long result;
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return (result == 0UL);
|
return (result == 0UL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" stb %%g0, [%0]"
|
" stb %%g0, [%0]"
|
||||||
|
@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
|
||||||
|
|
||||||
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
|
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
|
||||||
|
|
||||||
static void inline arch_read_lock(raw_rwlock_t *lock)
|
static void inline arch_read_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inline arch_read_trylock(raw_rwlock_t *lock)
|
static int inline arch_read_trylock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp1, tmp2;
|
int tmp1, tmp2;
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
|
||||||
return tmp1;
|
return tmp1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inline arch_read_unlock(raw_rwlock_t *lock)
|
static void inline arch_read_unlock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long tmp1, tmp2;
|
unsigned long tmp1, tmp2;
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inline arch_write_lock(raw_rwlock_t *lock)
|
static void inline arch_write_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long mask, tmp1, tmp2;
|
unsigned long mask, tmp1, tmp2;
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inline arch_write_unlock(raw_rwlock_t *lock)
|
static void inline arch_write_unlock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" stw %%g0, [%0]"
|
" stw %%g0, [%0]"
|
||||||
|
@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
|
||||||
: "memory");
|
: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inline arch_write_trylock(raw_rwlock_t *lock)
|
static int inline arch_write_trylock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
unsigned long mask, tmp1, tmp2, result;
|
unsigned long mask, tmp1, tmp2, result;
|
||||||
|
|
||||||
|
@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock(p) arch_read_lock(p)
|
#define arch_read_lock(p) arch_read_lock(p)
|
||||||
#define __raw_read_lock_flags(p, f) arch_read_lock(p)
|
#define arch_read_lock_flags(p, f) arch_read_lock(p)
|
||||||
#define __raw_read_trylock(p) arch_read_trylock(p)
|
#define arch_read_trylock(p) arch_read_trylock(p)
|
||||||
#define __raw_read_unlock(p) arch_read_unlock(p)
|
#define arch_read_unlock(p) arch_read_unlock(p)
|
||||||
#define __raw_write_lock(p) arch_write_lock(p)
|
#define arch_write_lock(p) arch_write_lock(p)
|
||||||
#define __raw_write_lock_flags(p, f) arch_write_lock(p)
|
#define arch_write_lock_flags(p, f) arch_write_lock(p)
|
||||||
#define __raw_write_unlock(p) arch_write_unlock(p)
|
#define arch_write_unlock(p) arch_write_unlock(p)
|
||||||
#define __raw_write_trylock(p) arch_write_trylock(p)
|
#define arch_write_trylock(p) arch_write_trylock(p)
|
||||||
|
|
||||||
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
|
#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
|
||||||
#define __raw_write_can_lock(rw) (!(rw)->lock)
|
#define arch_write_can_lock(rw) (!(rw)->lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
#endif /* !(__ASSEMBLY__) */
|
#endif /* !(__ASSEMBLY__) */
|
||||||
|
|
||||||
|
|
|
@ -7,14 +7,14 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned char lock;
|
volatile unsigned char lock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int lock;
|
volatile unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { 0 }
|
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
seq_printf(p, "NMI: ");
|
seq_printf(p, "NMI: ");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
|
@ -785,14 +785,14 @@ void fixup_irqs(void)
|
||||||
for (irq = 0; irq < NR_IRQS; irq++) {
|
for (irq = 0; irq < NR_IRQS; irq++) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&irq_desc[irq].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
|
||||||
if (irq_desc[irq].action &&
|
if (irq_desc[irq].action &&
|
||||||
!(irq_desc[irq].status & IRQ_PER_CPU)) {
|
!(irq_desc[irq].status & IRQ_PER_CPU)) {
|
||||||
if (irq_desc[irq].chip->set_affinity)
|
if (irq_desc[irq].chip->set_affinity)
|
||||||
irq_desc[irq].chip->set_affinity(irq,
|
irq_desc[irq].chip->set_affinity(irq,
|
||||||
irq_desc[irq].affinity);
|
irq_desc[irq].affinity);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
tick_ops->disable_irq();
|
tick_ops->disable_irq();
|
||||||
|
|
|
@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS)
|
} else if (i == NR_IRQS)
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
|
|
||||||
|
|
|
@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
|
static inline int arch_spin_is_locked(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
|
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
|
static inline int arch_spin_is_contended(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
|
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define arch_spin_is_contended arch_spin_is_contended
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
|
static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
|
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
|
static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
|
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
|
static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
|
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
|
static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
|
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,14 +318,14 @@ struct pv_mmu_ops {
|
||||||
phys_addr_t phys, pgprot_t flags);
|
phys_addr_t phys, pgprot_t flags);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct raw_spinlock;
|
struct arch_spinlock;
|
||||||
struct pv_lock_ops {
|
struct pv_lock_ops {
|
||||||
int (*spin_is_locked)(struct raw_spinlock *lock);
|
int (*spin_is_locked)(struct arch_spinlock *lock);
|
||||||
int (*spin_is_contended)(struct raw_spinlock *lock);
|
int (*spin_is_contended)(struct arch_spinlock *lock);
|
||||||
void (*spin_lock)(struct raw_spinlock *lock);
|
void (*spin_lock)(struct arch_spinlock *lock);
|
||||||
void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
|
void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
|
||||||
int (*spin_trylock)(struct raw_spinlock *lock);
|
int (*spin_trylock)(struct arch_spinlock *lock);
|
||||||
void (*spin_unlock)(struct raw_spinlock *lock);
|
void (*spin_unlock)(struct arch_spinlock *lock);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This contains all the paravirt structures: we get a convenient
|
/* This contains all the paravirt structures: we get a convenient
|
||||||
|
|
|
@ -58,7 +58,7 @@
|
||||||
#if (NR_CPUS < 256)
|
#if (NR_CPUS < 256)
|
||||||
#define TICKET_SHIFT 8
|
#define TICKET_SHIFT 8
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
short inc = 0x0100;
|
short inc = 0x0100;
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp, new;
|
int tmp, new;
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
|
||||||
: "+m" (lock->slock)
|
: "+m" (lock->slock)
|
||||||
|
@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||||
#else
|
#else
|
||||||
#define TICKET_SHIFT 16
|
#define TICKET_SHIFT 16
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int inc = 0x00010000;
|
int inc = 0x00010000;
|
||||||
int tmp;
|
int tmp;
|
||||||
|
@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
|
||||||
: "memory", "cc");
|
: "memory", "cc");
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp;
|
int tmp;
|
||||||
int new;
|
int new;
|
||||||
|
@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
|
||||||
: "+m" (lock->slock)
|
: "+m" (lock->slock)
|
||||||
|
@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp = ACCESS_ONCE(lock->slock);
|
int tmp = ACCESS_ONCE(lock->slock);
|
||||||
|
|
||||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
|
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
int tmp = ACCESS_ONCE(lock->slock);
|
int tmp = ACCESS_ONCE(lock->slock);
|
||||||
|
|
||||||
|
@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
||||||
|
|
||||||
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
||||||
|
|
||||||
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_locked(lock);
|
return __ticket_spin_is_locked(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_is_contended(lock);
|
return __ticket_spin_is_contended(lock);
|
||||||
}
|
}
|
||||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
#define arch_spin_is_contended arch_spin_is_contended
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_lock(lock);
|
__ticket_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
return __ticket_spin_trylock(lock);
|
return __ticket_spin_trylock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
__ticket_spin_unlock(lock);
|
__ticket_spin_unlock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
arch_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||||
|
|
||||||
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
while (__raw_spin_is_locked(lock))
|
while (arch_spin_is_locked(lock))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
||||||
* read_can_lock - would read_trylock() succeed?
|
* read_can_lock - would read_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
static inline int __raw_read_can_lock(raw_rwlock_t *lock)
|
static inline int arch_read_can_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
return (int)(lock)->lock > 0;
|
return (int)(lock)->lock > 0;
|
||||||
}
|
}
|
||||||
|
@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
|
||||||
* write_can_lock - would write_trylock() succeed?
|
* write_can_lock - would write_trylock() succeed?
|
||||||
* @lock: the rwlock in question.
|
* @lock: the rwlock in question.
|
||||||
*/
|
*/
|
||||||
static inline int __raw_write_can_lock(raw_rwlock_t *lock)
|
static inline int arch_write_can_lock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
return (lock)->lock == RW_LOCK_BIAS;
|
return (lock)->lock == RW_LOCK_BIAS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
|
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
|
||||||
"jns 1f\n"
|
"jns 1f\n"
|
||||||
|
@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
||||||
::LOCK_PTR_REG (rw) : "memory");
|
::LOCK_PTR_REG (rw) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
|
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
|
||||||
"jz 1f\n"
|
"jz 1f\n"
|
||||||
|
@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
||||||
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
|
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
static inline int arch_read_trylock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
atomic_t *count = (atomic_t *)lock;
|
atomic_t *count = (atomic_t *)lock;
|
||||||
|
|
||||||
|
@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
static inline int arch_write_trylock(arch_rwlock_t *lock)
|
||||||
{
|
{
|
||||||
atomic_t *count = (atomic_t *)lock;
|
atomic_t *count = (atomic_t *)lock;
|
||||||
|
|
||||||
|
@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
|
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
asm volatile(LOCK_PREFIX "addl %1, %0"
|
asm volatile(LOCK_PREFIX "addl %1, %0"
|
||||||
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
|
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||||
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||||
|
|
||||||
#define _raw_spin_relax(lock) cpu_relax()
|
#define arch_spin_relax(lock) cpu_relax()
|
||||||
#define _raw_read_relax(lock) cpu_relax()
|
#define arch_read_relax(lock) cpu_relax()
|
||||||
#define _raw_write_relax(lock) cpu_relax()
|
#define arch_write_relax(lock) cpu_relax()
|
||||||
|
|
||||||
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
|
||||||
static inline void smp_mb__after_lock(void) { }
|
static inline void smp_mb__after_lock(void) { }
|
||||||
|
|
|
@ -5,16 +5,16 @@
|
||||||
# error "please don't include this file directly"
|
# error "please don't include this file directly"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct raw_spinlock {
|
typedef struct arch_spinlock {
|
||||||
unsigned int slock;
|
unsigned int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned int lock;
|
unsigned int lock;
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
|
||||||
|
|
||||||
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
|
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
|
||||||
|
|
|
@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
cfg = irq_cfg(irq);
|
cfg = irq_cfg(irq);
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||||
}
|
}
|
||||||
__get_cpu_var(vector_irq)[vector] = -1;
|
__get_cpu_var(vector_irq)[vector] = -1;
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_exit();
|
irq_exit();
|
||||||
|
|
|
@ -188,7 +188,7 @@ void dump_stack(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dump_stack);
|
EXPORT_SYMBOL(dump_stack);
|
||||||
|
|
||||||
static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||||
static int die_owner = -1;
|
static int die_owner = -1;
|
||||||
static unsigned int die_nest_count;
|
static unsigned int die_nest_count;
|
||||||
|
|
||||||
|
@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void)
|
||||||
/* racy, but better than risking deadlock. */
|
/* racy, but better than risking deadlock. */
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
if (!__raw_spin_trylock(&die_lock)) {
|
if (!arch_spin_trylock(&die_lock)) {
|
||||||
if (cpu == die_owner)
|
if (cpu == die_owner)
|
||||||
/* nested oops. should stop eventually */;
|
/* nested oops. should stop eventually */;
|
||||||
else
|
else
|
||||||
__raw_spin_lock(&die_lock);
|
arch_spin_lock(&die_lock);
|
||||||
}
|
}
|
||||||
die_nest_count++;
|
die_nest_count++;
|
||||||
die_owner = cpu;
|
die_owner = cpu;
|
||||||
|
@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
||||||
die_nest_count--;
|
die_nest_count--;
|
||||||
if (!die_nest_count)
|
if (!die_nest_count)
|
||||||
/* Nest count reaches zero, release the lock. */
|
/* Nest count reaches zero, release the lock. */
|
||||||
__raw_spin_unlock(&die_lock);
|
arch_spin_unlock(&die_lock);
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
oops_exit();
|
oops_exit();
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
any_count |= kstat_irqs_cpu(i, j);
|
any_count |= kstat_irqs_cpu(i, j);
|
||||||
action = desc->action;
|
action = desc->action;
|
||||||
|
@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,12 +294,12 @@ void fixup_irqs(void)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* interrupt's are disabled at this point */
|
/* interrupt's are disabled at this point */
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
affinity = desc->affinity;
|
affinity = desc->affinity;
|
||||||
if (!irq_has_action(irq) ||
|
if (!irq_has_action(irq) ||
|
||||||
cpumask_equal(affinity, cpu_online_mask)) {
|
cpumask_equal(affinity, cpu_online_mask)) {
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,7 +326,7 @@ void fixup_irqs(void)
|
||||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
|
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
|
||||||
desc->chip->unmask(irq);
|
desc->chip->unmask(irq);
|
||||||
|
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
if (break_affinity && set_affinity)
|
if (break_affinity && set_affinity)
|
||||||
printk("Broke affinity for irq %i\n", irq);
|
printk("Broke affinity for irq %i\n", irq);
|
||||||
|
@ -356,10 +356,10 @@ void fixup_irqs(void)
|
||||||
irq = __get_cpu_var(vector_irq)[vector];
|
irq = __get_cpu_var(vector_irq)[vector];
|
||||||
|
|
||||||
desc = irq_to_desc(irq);
|
desc = irq_to_desc(irq);
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if (desc->chip->retrigger)
|
if (desc->chip->retrigger)
|
||||||
desc->chip->retrigger(irq);
|
desc->chip->retrigger(irq);
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,9 +8,9 @@
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__raw_spin_lock(lock);
|
arch_spin_lock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pv_lock_ops pv_lock_ops = {
|
struct pv_lock_ops pv_lock_ops = {
|
||||||
|
|
|
@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
|
||||||
* we want to have the fastest, inlined, non-debug version
|
* we want to have the fastest, inlined, non-debug version
|
||||||
* of a critical section, to be able to prove TSC time-warps:
|
* of a critical section, to be able to prove TSC time-warps:
|
||||||
*/
|
*/
|
||||||
static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
static __cpuinitdata cycles_t last_tsc;
|
static __cpuinitdata cycles_t last_tsc;
|
||||||
static __cpuinitdata cycles_t max_warp;
|
static __cpuinitdata cycles_t max_warp;
|
||||||
|
@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
|
||||||
* previous TSC that was measured (possibly on
|
* previous TSC that was measured (possibly on
|
||||||
* another CPU) and update the previous TSC timestamp.
|
* another CPU) and update the previous TSC timestamp.
|
||||||
*/
|
*/
|
||||||
__raw_spin_lock(&sync_lock);
|
arch_spin_lock(&sync_lock);
|
||||||
prev = last_tsc;
|
prev = last_tsc;
|
||||||
rdtsc_barrier();
|
rdtsc_barrier();
|
||||||
now = get_cycles();
|
now = get_cycles();
|
||||||
rdtsc_barrier();
|
rdtsc_barrier();
|
||||||
last_tsc = now;
|
last_tsc = now;
|
||||||
__raw_spin_unlock(&sync_lock);
|
arch_spin_unlock(&sync_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Be nice every now and then (and also check whether
|
* Be nice every now and then (and also check whether
|
||||||
|
@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
|
||||||
* we saw a time-warp of the TSC going backwards:
|
* we saw a time-warp of the TSC going backwards:
|
||||||
*/
|
*/
|
||||||
if (unlikely(prev > now)) {
|
if (unlikely(prev > now)) {
|
||||||
__raw_spin_lock(&sync_lock);
|
arch_spin_lock(&sync_lock);
|
||||||
max_warp = max(max_warp, prev - now);
|
max_warp = max(max_warp, prev - now);
|
||||||
nr_warps++;
|
nr_warps++;
|
||||||
__raw_spin_unlock(&sync_lock);
|
arch_spin_unlock(&sync_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WARN(!(now-start),
|
WARN(!(now-start),
|
||||||
|
|
|
@ -120,14 +120,14 @@ struct xen_spinlock {
|
||||||
unsigned short spinners; /* count of waiting cpus */
|
unsigned short spinners; /* count of waiting cpus */
|
||||||
};
|
};
|
||||||
|
|
||||||
static int xen_spin_is_locked(struct raw_spinlock *lock)
|
static int xen_spin_is_locked(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
|
|
||||||
return xl->lock != 0;
|
return xl->lock != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_spin_is_contended(struct raw_spinlock *lock)
|
static int xen_spin_is_contended(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
|
||||||
return xl->spinners != 0;
|
return xl->spinners != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xen_spin_trylock(struct raw_spinlock *lock)
|
static int xen_spin_trylock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
u8 old = 1;
|
u8 old = 1;
|
||||||
|
@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
|
||||||
__get_cpu_var(lock_spinners) = prev;
|
__get_cpu_var(lock_spinners) = prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
|
static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
struct xen_spinlock *prev;
|
struct xen_spinlock *prev;
|
||||||
|
@ -254,7 +254,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
|
static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
unsigned timeout;
|
unsigned timeout;
|
||||||
|
@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
|
||||||
spin_time_accum_total(start_spin);
|
spin_time_accum_total(start_spin);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_spin_lock(struct raw_spinlock *lock)
|
static void xen_spin_lock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
__xen_spin_lock(lock, false);
|
__xen_spin_lock(lock, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
|
static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
|
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
|
||||||
}
|
}
|
||||||
|
@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_spin_unlock(struct raw_spinlock *lock)
|
static void xen_spin_unlock(struct arch_spinlock *lock)
|
||||||
{
|
{
|
||||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i < NR_IRQS) {
|
if (i < NR_IRQS) {
|
||||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||||
action = irq_desc[i].action;
|
action = irq_desc[i].action;
|
||||||
if (!action)
|
if (!action)
|
||||||
goto skip;
|
goto skip;
|
||||||
|
@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
skip:
|
skip:
|
||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
seq_printf(p, "NMI: ");
|
seq_printf(p, "NMI: ");
|
||||||
for_each_online_cpu(j)
|
for_each_online_cpu(j)
|
||||||
|
|
|
@ -15,19 +15,19 @@
|
||||||
# define ATOMIC_HASH_SIZE 4
|
# define ATOMIC_HASH_SIZE 4
|
||||||
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
|
||||||
|
|
||||||
extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
|
||||||
|
|
||||||
/* Can't use raw_spin_lock_irq because of #include problems, so
|
/* Can't use raw_spin_lock_irq because of #include problems, so
|
||||||
* this is the substitute */
|
* this is the substitute */
|
||||||
#define _atomic_spin_lock_irqsave(l,f) do { \
|
#define _atomic_spin_lock_irqsave(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
local_irq_save(f); \
|
local_irq_save(f); \
|
||||||
__raw_spin_lock(s); \
|
arch_spin_lock(s); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
#define _atomic_spin_unlock_irqrestore(l,f) do { \
|
||||||
raw_spinlock_t *s = ATOMIC_HASH(l); \
|
arch_spinlock_t *s = ATOMIC_HASH(l); \
|
||||||
__raw_spin_unlock(s); \
|
arch_spin_unlock(s); \
|
||||||
local_irq_restore(f); \
|
local_irq_restore(f); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
|
|
@ -169,7 +169,7 @@ struct hrtimer_clock_base {
|
||||||
* @max_hang_time: Maximum time spent in hrtimer_interrupt
|
* @max_hang_time: Maximum time spent in hrtimer_interrupt
|
||||||
*/
|
*/
|
||||||
struct hrtimer_cpu_base {
|
struct hrtimer_cpu_base {
|
||||||
spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
|
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
|
||||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||||
ktime_t expires_next;
|
ktime_t expires_next;
|
||||||
|
|
|
@ -170,7 +170,7 @@ extern struct cred init_cred;
|
||||||
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
|
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
|
||||||
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
|
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
|
||||||
.fs_excl = ATOMIC_INIT(0), \
|
.fs_excl = ATOMIC_INIT(0), \
|
||||||
.pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
|
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
|
||||||
.timer_slack_ns = 50000, /* 50 usec default slack */ \
|
.timer_slack_ns = 50000, /* 50 usec default slack */ \
|
||||||
.pids = { \
|
.pids = { \
|
||||||
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
|
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
|
||||||
|
|
|
@ -192,7 +192,7 @@ struct irq_desc {
|
||||||
unsigned int irq_count; /* For detecting broken IRQs */
|
unsigned int irq_count; /* For detecting broken IRQs */
|
||||||
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
||||||
unsigned int irqs_unhandled;
|
unsigned int irqs_unhandled;
|
||||||
spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
cpumask_var_t affinity;
|
cpumask_var_t affinity;
|
||||||
unsigned int node;
|
unsigned int node;
|
||||||
|
|
|
@ -681,7 +681,7 @@ struct perf_event_context {
|
||||||
* Protect the states of the events in the list,
|
* Protect the states of the events in the list,
|
||||||
* nr_active, and the list:
|
* nr_active, and the list:
|
||||||
*/
|
*/
|
||||||
spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
/*
|
/*
|
||||||
* Protect the list of events. Locking either mutex or lock
|
* Protect the list of events. Locking either mutex or lock
|
||||||
* is sufficient to ensure the list doesn't change; to change
|
* is sufficient to ensure the list doesn't change; to change
|
||||||
|
|
|
@ -81,7 +81,8 @@ struct plist_head {
|
||||||
struct list_head prio_list;
|
struct list_head prio_list;
|
||||||
struct list_head node_list;
|
struct list_head node_list;
|
||||||
#ifdef CONFIG_DEBUG_PI_LIST
|
#ifdef CONFIG_DEBUG_PI_LIST
|
||||||
spinlock_t *lock;
|
raw_spinlock_t *rawlock;
|
||||||
|
spinlock_t *spinlock;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -91,9 +92,11 @@ struct plist_node {
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_PI_LIST
|
#ifdef CONFIG_DEBUG_PI_LIST
|
||||||
# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
|
# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
|
||||||
|
# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
|
||||||
#else
|
#else
|
||||||
# define PLIST_HEAD_LOCK_INIT(_lock)
|
# define PLIST_HEAD_LOCK_INIT(_lock)
|
||||||
|
# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define _PLIST_HEAD_INIT(head) \
|
#define _PLIST_HEAD_INIT(head) \
|
||||||
|
@ -111,6 +114,17 @@ struct plist_node {
|
||||||
PLIST_HEAD_LOCK_INIT(&(_lock)) \
|
PLIST_HEAD_LOCK_INIT(&(_lock)) \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* PLIST_HEAD_INIT_RAW - static struct plist_head initializer
|
||||||
|
* @head: struct plist_head variable name
|
||||||
|
* @_lock: lock to initialize for this list
|
||||||
|
*/
|
||||||
|
#define PLIST_HEAD_INIT_RAW(head, _lock) \
|
||||||
|
{ \
|
||||||
|
_PLIST_HEAD_INIT(head), \
|
||||||
|
PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PLIST_NODE_INIT - static struct plist_node initializer
|
* PLIST_NODE_INIT - static struct plist_node initializer
|
||||||
* @node: struct plist_node variable name
|
* @node: struct plist_node variable name
|
||||||
|
@ -125,7 +139,7 @@ struct plist_node {
|
||||||
/**
|
/**
|
||||||
* plist_head_init - dynamic struct plist_head initializer
|
* plist_head_init - dynamic struct plist_head initializer
|
||||||
* @head: &struct plist_head pointer
|
* @head: &struct plist_head pointer
|
||||||
* @lock: list spinlock, remembered for debugging
|
* @lock: spinlock protecting the list (debugging)
|
||||||
*/
|
*/
|
||||||
static inline void
|
static inline void
|
||||||
plist_head_init(struct plist_head *head, spinlock_t *lock)
|
plist_head_init(struct plist_head *head, spinlock_t *lock)
|
||||||
|
@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
|
||||||
INIT_LIST_HEAD(&head->prio_list);
|
INIT_LIST_HEAD(&head->prio_list);
|
||||||
INIT_LIST_HEAD(&head->node_list);
|
INIT_LIST_HEAD(&head->node_list);
|
||||||
#ifdef CONFIG_DEBUG_PI_LIST
|
#ifdef CONFIG_DEBUG_PI_LIST
|
||||||
head->lock = lock;
|
head->spinlock = lock;
|
||||||
|
head->rawlock = NULL;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* plist_head_init_raw - dynamic struct plist_head initializer
|
||||||
|
* @head: &struct plist_head pointer
|
||||||
|
* @lock: raw_spinlock protecting the list (debugging)
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
INIT_LIST_HEAD(&head->prio_list);
|
||||||
|
INIT_LIST_HEAD(&head->node_list);
|
||||||
|
#ifdef CONFIG_DEBUG_PI_LIST
|
||||||
|
head->rawlock = lock;
|
||||||
|
head->spinlock = NULL;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
* @owner: the mutex owner
|
* @owner: the mutex owner
|
||||||
*/
|
*/
|
||||||
struct rt_mutex {
|
struct rt_mutex {
|
||||||
spinlock_t wait_lock;
|
raw_spinlock_t wait_lock;
|
||||||
struct plist_head wait_list;
|
struct plist_head wait_list;
|
||||||
struct task_struct *owner;
|
struct task_struct *owner;
|
||||||
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
||||||
|
@ -63,8 +63,8 @@ struct hrtimer_sleeper;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define __RT_MUTEX_INITIALIZER(mutexname) \
|
#define __RT_MUTEX_INITIALIZER(mutexname) \
|
||||||
{ .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
||||||
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
|
, .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
|
||||||
, .owner = NULL \
|
, .owner = NULL \
|
||||||
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
|
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
|
||||||
|
|
||||||
|
|
125
include/linux/rwlock.h
Normal file
125
include/linux/rwlock.h
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
#ifndef __LINUX_RWLOCK_H
|
||||||
|
#define __LINUX_RWLOCK_H
|
||||||
|
|
||||||
|
#ifndef __LINUX_SPINLOCK_H
|
||||||
|
# error "please don't include this file directly"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rwlock related methods
|
||||||
|
*
|
||||||
|
* split out from spinlock.h
|
||||||
|
*
|
||||||
|
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
|
||||||
|
* Released under the General Public License (GPL).
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
|
extern void __rwlock_init(rwlock_t *lock, const char *name,
|
||||||
|
struct lock_class_key *key);
|
||||||
|
# define rwlock_init(lock) \
|
||||||
|
do { \
|
||||||
|
static struct lock_class_key __key; \
|
||||||
|
\
|
||||||
|
__rwlock_init((lock), #lock, &__key); \
|
||||||
|
} while (0)
|
||||||
|
#else
|
||||||
|
# define rwlock_init(lock) \
|
||||||
|
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
|
extern void do_raw_read_lock(rwlock_t *lock);
|
||||||
|
#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
|
||||||
|
extern int do_raw_read_trylock(rwlock_t *lock);
|
||||||
|
extern void do_raw_read_unlock(rwlock_t *lock);
|
||||||
|
extern void do_raw_write_lock(rwlock_t *lock);
|
||||||
|
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
|
||||||
|
extern int do_raw_write_trylock(rwlock_t *lock);
|
||||||
|
extern void do_raw_write_unlock(rwlock_t *lock);
|
||||||
|
#else
|
||||||
|
# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
|
||||||
|
# define do_raw_read_lock_flags(lock, flags) \
|
||||||
|
arch_read_lock_flags(&(lock)->raw_lock, *(flags))
|
||||||
|
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
|
||||||
|
# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
|
||||||
|
# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
|
||||||
|
# define do_raw_write_lock_flags(lock, flags) \
|
||||||
|
arch_write_lock_flags(&(lock)->raw_lock, *(flags))
|
||||||
|
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
|
||||||
|
# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
|
||||||
|
#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Define the various rw_lock methods. Note we define these
|
||||||
|
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
|
||||||
|
* methods are defined as nops in the case they are not required.
|
||||||
|
*/
|
||||||
|
#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
|
||||||
|
#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
|
||||||
|
|
||||||
|
#define write_lock(lock) _raw_write_lock(lock)
|
||||||
|
#define read_lock(lock) _raw_read_lock(lock)
|
||||||
|
|
||||||
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
||||||
|
|
||||||
|
#define read_lock_irqsave(lock, flags) \
|
||||||
|
do { \
|
||||||
|
typecheck(unsigned long, flags); \
|
||||||
|
flags = _raw_read_lock_irqsave(lock); \
|
||||||
|
} while (0)
|
||||||
|
#define write_lock_irqsave(lock, flags) \
|
||||||
|
do { \
|
||||||
|
typecheck(unsigned long, flags); \
|
||||||
|
flags = _raw_write_lock_irqsave(lock); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define read_lock_irqsave(lock, flags) \
|
||||||
|
do { \
|
||||||
|
typecheck(unsigned long, flags); \
|
||||||
|
_raw_read_lock_irqsave(lock, flags); \
|
||||||
|
} while (0)
|
||||||
|
#define write_lock_irqsave(lock, flags) \
|
||||||
|
do { \
|
||||||
|
typecheck(unsigned long, flags); \
|
||||||
|
_raw_write_lock_irqsave(lock, flags); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define read_lock_irq(lock) _raw_read_lock_irq(lock)
|
||||||
|
#define read_lock_bh(lock) _raw_read_lock_bh(lock)
|
||||||
|
#define write_lock_irq(lock) _raw_write_lock_irq(lock)
|
||||||
|
#define write_lock_bh(lock) _raw_write_lock_bh(lock)
|
||||||
|
#define read_unlock(lock) _raw_read_unlock(lock)
|
||||||
|
#define write_unlock(lock) _raw_write_unlock(lock)
|
||||||
|
#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
|
||||||
|
#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
|
||||||
|
|
||||||
|
#define read_unlock_irqrestore(lock, flags) \
|
||||||
|
do { \
|
||||||
|
typecheck(unsigned long, flags); \
|
||||||
|
_raw_read_unlock_irqrestore(lock, flags); \
|
||||||
|
} while (0)
|
||||||
|
#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
|
||||||
|
|
||||||
|
#define write_unlock_irqrestore(lock, flags) \
|
||||||
|
do { \
|
||||||
|
typecheck(unsigned long, flags); \
|
||||||
|
_raw_write_unlock_irqrestore(lock, flags); \
|
||||||
|
} while (0)
|
||||||
|
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
|
||||||
|
|
||||||
|
#define write_trylock_irqsave(lock, flags) \
|
||||||
|
({ \
|
||||||
|
local_irq_save(flags); \
|
||||||
|
write_trylock(lock) ? \
|
||||||
|
1 : ({ local_irq_restore(flags); 0; }); \
|
||||||
|
})
|
||||||
|
|
||||||
|
#endif /* __LINUX_RWLOCK_H */
|
282
include/linux/rwlock_api_smp.h
Normal file
282
include/linux/rwlock_api_smp.h
Normal file
|
@ -0,0 +1,282 @@
|
||||||
|
#ifndef __LINUX_RWLOCK_API_SMP_H
|
||||||
|
#define __LINUX_RWLOCK_API_SMP_H
|
||||||
|
|
||||||
|
#ifndef __LINUX_SPINLOCK_API_SMP_H
|
||||||
|
# error "please don't include this file directly"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* include/linux/rwlock_api_smp.h
|
||||||
|
*
|
||||||
|
* spinlock API declarations on SMP (and debug)
|
||||||
|
* (implemented in kernel/spinlock.c)
|
||||||
|
*
|
||||||
|
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
|
||||||
|
* Released under the General Public License (GPL).
|
||||||
|
*/
|
||||||
|
|
||||||
|
void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
|
||||||
|
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
|
||||||
|
void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
|
||||||
|
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
|
||||||
|
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
|
||||||
|
void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
|
||||||
|
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
|
||||||
|
__acquires(lock);
|
||||||
|
unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
|
||||||
|
__acquires(lock);
|
||||||
|
int __lockfunc _raw_read_trylock(rwlock_t *lock);
|
||||||
|
int __lockfunc _raw_write_trylock(rwlock_t *lock);
|
||||||
|
void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
|
||||||
|
void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
|
||||||
|
void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
|
||||||
|
void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
|
||||||
|
void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
|
||||||
|
void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
|
||||||
|
void __lockfunc
|
||||||
|
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
||||||
|
__releases(lock);
|
||||||
|
void __lockfunc
|
||||||
|
_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
||||||
|
__releases(lock);
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_LOCK
|
||||||
|
#define _raw_read_lock(lock) __raw_read_lock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_LOCK
|
||||||
|
#define _raw_write_lock(lock) __raw_write_lock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_LOCK_BH
|
||||||
|
#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_LOCK_BH
|
||||||
|
#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_LOCK_IRQ
|
||||||
|
#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
|
||||||
|
#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
|
||||||
|
#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
|
||||||
|
#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_TRYLOCK
|
||||||
|
#define _raw_read_trylock(lock) __raw_read_trylock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_TRYLOCK
|
||||||
|
#define _raw_write_trylock(lock) __raw_write_trylock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_UNLOCK
|
||||||
|
#define _raw_read_unlock(lock) __raw_read_unlock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_UNLOCK
|
||||||
|
#define _raw_write_unlock(lock) __raw_write_unlock(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_UNLOCK_BH
|
||||||
|
#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
|
||||||
|
#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
|
||||||
|
#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
|
||||||
|
#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
|
||||||
|
#define _raw_read_unlock_irqrestore(lock, flags) \
|
||||||
|
__raw_read_unlock_irqrestore(lock, flags)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
|
||||||
|
#define _raw_write_unlock_irqrestore(lock, flags) \
|
||||||
|
__raw_write_unlock_irqrestore(lock, flags)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline int __raw_read_trylock(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
if (do_raw_read_trylock(lock)) {
|
||||||
|
rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
preempt_enable();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int __raw_write_trylock(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
if (do_raw_write_trylock(lock)) {
|
||||||
|
rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
preempt_enable();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If lockdep is enabled then we use the non-preemption spin-ops
|
||||||
|
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
||||||
|
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
|
||||||
|
*/
|
||||||
|
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
|
||||||
|
|
||||||
|
static inline void __raw_read_lock(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
|
||||||
|
do_raw_read_lock_flags, &flags);
|
||||||
|
return flags;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_read_lock_irq(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
local_irq_disable();
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_read_lock_bh(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
local_bh_disable();
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
|
||||||
|
do_raw_write_lock_flags, &flags);
|
||||||
|
return flags;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_lock_irq(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
local_irq_disable();
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_lock_bh(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
local_bh_disable();
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_lock(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
preempt_disable();
|
||||||
|
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
|
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_PREEMPT */
|
||||||
|
|
||||||
|
static inline void __raw_write_unlock(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_write_unlock(lock);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_read_unlock(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_read_unlock(lock);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_read_unlock(lock);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_read_unlock_irq(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_read_unlock(lock);
|
||||||
|
local_irq_enable();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_read_unlock_bh(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_read_unlock(lock);
|
||||||
|
preempt_enable_no_resched();
|
||||||
|
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
|
||||||
|
unsigned long flags)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_write_unlock(lock);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_unlock_irq(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_write_unlock(lock);
|
||||||
|
local_irq_enable();
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_write_unlock_bh(rwlock_t *lock)
|
||||||
|
{
|
||||||
|
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
|
do_raw_write_unlock(lock);
|
||||||
|
preempt_enable_no_resched();
|
||||||
|
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* __LINUX_RWLOCK_API_SMP_H */
|
56
include/linux/rwlock_types.h
Normal file
56
include/linux/rwlock_types.h
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
#ifndef __LINUX_RWLOCK_TYPES_H
|
||||||
|
#define __LINUX_RWLOCK_TYPES_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* include/linux/rwlock_types.h - generic rwlock type definitions
|
||||||
|
* and initializers
|
||||||
|
*
|
||||||
|
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
|
||||||
|
* Released under the General Public License (GPL).
|
||||||
|
*/
|
||||||
|
typedef struct {
|
||||||
|
arch_rwlock_t raw_lock;
|
||||||
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||||
|
unsigned int break_lock;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
|
unsigned int magic, owner_cpu;
|
||||||
|
void *owner;
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
struct lockdep_map dep_map;
|
||||||
|
#endif
|
||||||
|
} rwlock_t;
|
||||||
|
|
||||||
|
#define RWLOCK_MAGIC 0xdeaf1eed
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
||||||
|
#else
|
||||||
|
# define RW_DEP_MAP_INIT(lockname)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
|
#define __RW_LOCK_UNLOCKED(lockname) \
|
||||||
|
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
|
||||||
|
.magic = RWLOCK_MAGIC, \
|
||||||
|
.owner = SPINLOCK_OWNER_INIT, \
|
||||||
|
.owner_cpu = -1, \
|
||||||
|
RW_DEP_MAP_INIT(lockname) }
|
||||||
|
#else
|
||||||
|
#define __RW_LOCK_UNLOCKED(lockname) \
|
||||||
|
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
|
||||||
|
RW_DEP_MAP_INIT(lockname) }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
|
||||||
|
* deprecated.
|
||||||
|
*
|
||||||
|
* Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
|
||||||
|
*/
|
||||||
|
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
|
||||||
|
|
||||||
|
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
|
||||||
|
|
||||||
|
#endif /* __LINUX_RWLOCK_TYPES_H */
|
|
@ -1409,7 +1409,7 @@ struct task_struct {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Protection of the PI data structures: */
|
/* Protection of the PI data structures: */
|
||||||
spinlock_t pi_lock;
|
raw_spinlock_t pi_lock;
|
||||||
|
|
||||||
#ifdef CONFIG_RT_MUTEXES
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
/* PI waiters blocked on a rt_mutex held by this task */
|
/* PI waiters blocked on a rt_mutex held by this task */
|
||||||
|
|
|
@ -8,13 +8,13 @@
|
||||||
*
|
*
|
||||||
* on SMP builds:
|
* on SMP builds:
|
||||||
*
|
*
|
||||||
* asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
|
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
|
||||||
* initializers
|
* initializers
|
||||||
*
|
*
|
||||||
* linux/spinlock_types.h:
|
* linux/spinlock_types.h:
|
||||||
* defines the generic type and initializers
|
* defines the generic type and initializers
|
||||||
*
|
*
|
||||||
* asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
|
* asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
|
||||||
* implementations, mostly inline assembly code
|
* implementations, mostly inline assembly code
|
||||||
*
|
*
|
||||||
* (also included on UP-debug builds:)
|
* (also included on UP-debug builds:)
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
* defines the generic type and initializers
|
* defines the generic type and initializers
|
||||||
*
|
*
|
||||||
* linux/spinlock_up.h:
|
* linux/spinlock_up.h:
|
||||||
* contains the __raw_spin_*()/etc. version of UP
|
* contains the arch_spin_*()/etc. version of UP
|
||||||
* builds. (which are NOPs on non-debug, non-preempt
|
* builds. (which are NOPs on non-debug, non-preempt
|
||||||
* builds)
|
* builds)
|
||||||
*
|
*
|
||||||
|
@ -75,12 +75,12 @@
|
||||||
#define __lockfunc __attribute__((section(".spinlock.text")))
|
#define __lockfunc __attribute__((section(".spinlock.text")))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pull the raw_spinlock_t and raw_rwlock_t definitions:
|
* Pull the arch_spinlock_t and arch_rwlock_t definitions:
|
||||||
*/
|
*/
|
||||||
#include <linux/spinlock_types.h>
|
#include <linux/spinlock_types.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
|
* Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
# include <asm/spinlock.h>
|
# include <asm/spinlock.h>
|
||||||
|
@ -89,45 +89,31 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
extern void __spin_lock_init(spinlock_t *lock, const char *name,
|
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
|
||||||
struct lock_class_key *key);
|
struct lock_class_key *key);
|
||||||
# define spin_lock_init(lock) \
|
# define raw_spin_lock_init(lock) \
|
||||||
do { \
|
do { \
|
||||||
static struct lock_class_key __key; \
|
static struct lock_class_key __key; \
|
||||||
\
|
\
|
||||||
__spin_lock_init((lock), #lock, &__key); \
|
__raw_spin_lock_init((lock), #lock, &__key); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
# define spin_lock_init(lock) \
|
# define raw_spin_lock_init(lock) \
|
||||||
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
|
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
|
||||||
extern void __rwlock_init(rwlock_t *lock, const char *name,
|
|
||||||
struct lock_class_key *key);
|
|
||||||
# define rwlock_init(lock) \
|
|
||||||
do { \
|
|
||||||
static struct lock_class_key __key; \
|
|
||||||
\
|
|
||||||
__rwlock_init((lock), #lock, &__key); \
|
|
||||||
} while (0)
|
|
||||||
#else
|
|
||||||
# define rwlock_init(lock) \
|
|
||||||
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
|
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_LOCKBREAK
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||||
#define spin_is_contended(lock) ((lock)->break_lock)
|
#define raw_spin_is_contended(lock) ((lock)->break_lock)
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#ifdef __raw_spin_is_contended
|
#ifdef arch_spin_is_contended
|
||||||
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
|
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
|
||||||
#else
|
#else
|
||||||
#define spin_is_contended(lock) (((void)(lock), 0))
|
#define raw_spin_is_contended(lock) (((void)(lock), 0))
|
||||||
#endif /*__raw_spin_is_contended*/
|
#endif /*arch_spin_is_contended*/
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* The lock does not imply full memory barrier. */
|
/* The lock does not imply full memory barrier. */
|
||||||
|
@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* spin_unlock_wait - wait until the spinlock gets unlocked
|
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
|
||||||
* @lock: the spinlock in question.
|
* @lock: the spinlock in question.
|
||||||
*/
|
*/
|
||||||
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
|
#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
extern void _raw_spin_lock(spinlock_t *lock);
|
extern void do_raw_spin_lock(raw_spinlock_t *lock);
|
||||||
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
|
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
|
||||||
extern int _raw_spin_trylock(spinlock_t *lock);
|
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
|
||||||
extern void _raw_spin_unlock(spinlock_t *lock);
|
extern void do_raw_spin_unlock(raw_spinlock_t *lock);
|
||||||
extern void _raw_read_lock(rwlock_t *lock);
|
|
||||||
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
|
|
||||||
extern int _raw_read_trylock(rwlock_t *lock);
|
|
||||||
extern void _raw_read_unlock(rwlock_t *lock);
|
|
||||||
extern void _raw_write_lock(rwlock_t *lock);
|
|
||||||
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
|
|
||||||
extern int _raw_write_trylock(rwlock_t *lock);
|
|
||||||
extern void _raw_write_unlock(rwlock_t *lock);
|
|
||||||
#else
|
#else
|
||||||
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
|
static inline void do_raw_spin_lock(raw_spinlock_t *lock)
|
||||||
# define _raw_spin_lock_flags(lock, flags) \
|
{
|
||||||
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
|
arch_spin_lock(&lock->raw_lock);
|
||||||
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
|
}
|
||||||
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
|
|
||||||
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
|
|
||||||
# define _raw_read_lock_flags(lock, flags) \
|
|
||||||
__raw_read_lock_flags(&(lock)->raw_lock, *(flags))
|
|
||||||
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
|
|
||||||
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
|
|
||||||
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
|
|
||||||
# define _raw_write_lock_flags(lock, flags) \
|
|
||||||
__raw_write_lock_flags(&(lock)->raw_lock, *(flags))
|
|
||||||
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
|
|
||||||
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
|
static inline void
|
||||||
#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
|
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
|
||||||
|
{
|
||||||
|
arch_spin_lock_flags(&lock->raw_lock, *flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return arch_spin_trylock(&(lock)->raw_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
|
{
|
||||||
|
arch_spin_unlock(&lock->raw_lock);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define the various spin_lock and rw_lock methods. Note we define these
|
* Define the various spin_lock methods. Note we define these
|
||||||
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
|
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
|
||||||
* methods are defined as nops in the case they are not required.
|
* various methods are defined as nops in the case they are not
|
||||||
|
* required.
|
||||||
*/
|
*/
|
||||||
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
|
#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
|
||||||
#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
|
|
||||||
#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
|
|
||||||
|
|
||||||
#define spin_lock(lock) _spin_lock(lock)
|
#define raw_spin_lock(lock) _raw_spin_lock(lock)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
|
# define raw_spin_lock_nested(lock, subclass) \
|
||||||
# define spin_lock_nest_lock(lock, nest_lock) \
|
_raw_spin_lock_nested(lock, subclass)
|
||||||
|
|
||||||
|
# define raw_spin_lock_nest_lock(lock, nest_lock) \
|
||||||
do { \
|
do { \
|
||||||
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
|
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
|
||||||
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else
|
#else
|
||||||
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
|
# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
|
||||||
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
|
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define write_lock(lock) _write_lock(lock)
|
|
||||||
#define read_lock(lock) _read_lock(lock)
|
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
||||||
|
|
||||||
#define spin_lock_irqsave(lock, flags) \
|
#define raw_spin_lock_irqsave(lock, flags) \
|
||||||
do { \
|
do { \
|
||||||
typecheck(unsigned long, flags); \
|
typecheck(unsigned long, flags); \
|
||||||
flags = _spin_lock_irqsave(lock); \
|
flags = _raw_spin_lock_irqsave(lock); \
|
||||||
} while (0)
|
|
||||||
#define read_lock_irqsave(lock, flags) \
|
|
||||||
do { \
|
|
||||||
typecheck(unsigned long, flags); \
|
|
||||||
flags = _read_lock_irqsave(lock); \
|
|
||||||
} while (0)
|
|
||||||
#define write_lock_irqsave(lock, flags) \
|
|
||||||
do { \
|
|
||||||
typecheck(unsigned long, flags); \
|
|
||||||
flags = _write_lock_irqsave(lock); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
#define spin_lock_irqsave_nested(lock, flags, subclass) \
|
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
|
||||||
do { \
|
do { \
|
||||||
typecheck(unsigned long, flags); \
|
typecheck(unsigned long, flags); \
|
||||||
flags = _spin_lock_irqsave_nested(lock, subclass); \
|
flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else
|
#else
|
||||||
#define spin_lock_irqsave_nested(lock, flags, subclass) \
|
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
|
||||||
do { \
|
do { \
|
||||||
typecheck(unsigned long, flags); \
|
typecheck(unsigned long, flags); \
|
||||||
flags = _spin_lock_irqsave(lock); \
|
flags = _raw_spin_lock_irqsave(lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define spin_lock_irqsave(lock, flags) \
|
#define raw_spin_lock_irqsave(lock, flags) \
|
||||||
do { \
|
do { \
|
||||||
typecheck(unsigned long, flags); \
|
typecheck(unsigned long, flags); \
|
||||||
_spin_lock_irqsave(lock, flags); \
|
_raw_spin_lock_irqsave(lock, flags); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define read_lock_irqsave(lock, flags) \
|
|
||||||
do { \
|
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
|
||||||
typecheck(unsigned long, flags); \
|
raw_spin_lock_irqsave(lock, flags)
|
||||||
_read_lock_irqsave(lock, flags); \
|
|
||||||
} while (0)
|
|
||||||
#define write_lock_irqsave(lock, flags) \
|
|
||||||
do { \
|
|
||||||
typecheck(unsigned long, flags); \
|
|
||||||
_write_lock_irqsave(lock, flags); \
|
|
||||||
} while (0)
|
|
||||||
#define spin_lock_irqsave_nested(lock, flags, subclass) \
|
|
||||||
spin_lock_irqsave(lock, flags)
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define spin_lock_irq(lock) _spin_lock_irq(lock)
|
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
|
||||||
#define spin_lock_bh(lock) _spin_lock_bh(lock)
|
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
|
||||||
#define read_lock_irq(lock) _read_lock_irq(lock)
|
#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
|
||||||
#define read_lock_bh(lock) _read_lock_bh(lock)
|
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
|
||||||
#define write_lock_irq(lock) _write_lock_irq(lock)
|
|
||||||
#define write_lock_bh(lock) _write_lock_bh(lock)
|
|
||||||
#define spin_unlock(lock) _spin_unlock(lock)
|
|
||||||
#define read_unlock(lock) _read_unlock(lock)
|
|
||||||
#define write_unlock(lock) _write_unlock(lock)
|
|
||||||
#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
|
|
||||||
#define read_unlock_irq(lock) _read_unlock_irq(lock)
|
|
||||||
#define write_unlock_irq(lock) _write_unlock_irq(lock)
|
|
||||||
|
|
||||||
#define spin_unlock_irqrestore(lock, flags) \
|
#define raw_spin_unlock_irqrestore(lock, flags) \
|
||||||
do { \
|
do { \
|
||||||
typecheck(unsigned long, flags); \
|
typecheck(unsigned long, flags); \
|
||||||
_spin_unlock_irqrestore(lock, flags); \
|
_raw_spin_unlock_irqrestore(lock, flags); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
|
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
|
||||||
|
|
||||||
#define read_unlock_irqrestore(lock, flags) \
|
#define raw_spin_trylock_bh(lock) \
|
||||||
do { \
|
__cond_lock(lock, _raw_spin_trylock_bh(lock))
|
||||||
typecheck(unsigned long, flags); \
|
|
||||||
_read_unlock_irqrestore(lock, flags); \
|
|
||||||
} while (0)
|
|
||||||
#define read_unlock_bh(lock) _read_unlock_bh(lock)
|
|
||||||
|
|
||||||
#define write_unlock_irqrestore(lock, flags) \
|
#define raw_spin_trylock_irq(lock) \
|
||||||
do { \
|
|
||||||
typecheck(unsigned long, flags); \
|
|
||||||
_write_unlock_irqrestore(lock, flags); \
|
|
||||||
} while (0)
|
|
||||||
#define write_unlock_bh(lock) _write_unlock_bh(lock)
|
|
||||||
|
|
||||||
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
|
|
||||||
|
|
||||||
#define spin_trylock_irq(lock) \
|
|
||||||
({ \
|
({ \
|
||||||
local_irq_disable(); \
|
local_irq_disable(); \
|
||||||
spin_trylock(lock) ? \
|
raw_spin_trylock(lock) ? \
|
||||||
1 : ({ local_irq_enable(); 0; }); \
|
1 : ({ local_irq_enable(); 0; }); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define spin_trylock_irqsave(lock, flags) \
|
#define raw_spin_trylock_irqsave(lock, flags) \
|
||||||
({ \
|
({ \
|
||||||
local_irq_save(flags); \
|
local_irq_save(flags); \
|
||||||
spin_trylock(lock) ? \
|
raw_spin_trylock(lock) ? \
|
||||||
1 : ({ local_irq_restore(flags); 0; }); \
|
1 : ({ local_irq_restore(flags); 0; }); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define write_trylock_irqsave(lock, flags) \
|
/**
|
||||||
|
* raw_spin_can_lock - would raw_spin_trylock() succeed?
|
||||||
|
* @lock: the spinlock in question.
|
||||||
|
*/
|
||||||
|
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
|
||||||
|
|
||||||
|
/* Include rwlock functions */
|
||||||
|
#include <linux/rwlock.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
||||||
|
# include <linux/spinlock_api_smp.h>
|
||||||
|
#else
|
||||||
|
# include <linux/spinlock_api_up.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
|
||||||
|
*/
|
||||||
|
|
||||||
|
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return &lock->rlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define spin_lock_init(_lock) \
|
||||||
|
do { \
|
||||||
|
spinlock_check(_lock); \
|
||||||
|
raw_spin_lock_init(&(_lock)->rlock); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline void spin_lock(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
raw_spin_lock(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void spin_lock_bh(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
raw_spin_lock_bh(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int spin_trylock(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return raw_spin_trylock(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define spin_lock_nested(lock, subclass) \
|
||||||
|
do { \
|
||||||
|
raw_spin_lock_nested(spinlock_check(lock), subclass); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define spin_lock_nest_lock(lock, nest_lock) \
|
||||||
|
do { \
|
||||||
|
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline void spin_lock_irq(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
raw_spin_lock_irq(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define spin_lock_irqsave(lock, flags) \
|
||||||
|
do { \
|
||||||
|
raw_spin_lock_irqsave(spinlock_check(lock), flags); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define spin_lock_irqsave_nested(lock, flags, subclass) \
|
||||||
|
do { \
|
||||||
|
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline void spin_unlock(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
raw_spin_unlock(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void spin_unlock_bh(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
raw_spin_unlock_bh(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void spin_unlock_irq(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
raw_spin_unlock_irq(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
|
||||||
|
{
|
||||||
|
raw_spin_unlock_irqrestore(&lock->rlock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int spin_trylock_bh(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return raw_spin_trylock_bh(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int spin_trylock_irq(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return raw_spin_trylock_irq(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define spin_trylock_irqsave(lock, flags) \
|
||||||
({ \
|
({ \
|
||||||
local_irq_save(flags); \
|
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
|
||||||
write_trylock(lock) ? \
|
|
||||||
1 : ({ local_irq_restore(flags); 0; }); \
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
static inline void spin_unlock_wait(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
raw_spin_unlock_wait(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int spin_is_locked(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return raw_spin_is_locked(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int spin_is_contended(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return raw_spin_is_contended(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int spin_can_lock(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
return raw_spin_can_lock(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void assert_spin_locked(spinlock_t *lock)
|
||||||
|
{
|
||||||
|
assert_raw_spin_locked(&lock->rlock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pull the atomic_t declaration:
|
* Pull the atomic_t declaration:
|
||||||
* (asm-mips/atomic.h needs above definitions)
|
* (asm-mips/atomic.h needs above definitions)
|
||||||
|
@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
|
||||||
#define atomic_dec_and_lock(atomic, lock) \
|
#define atomic_dec_and_lock(atomic, lock) \
|
||||||
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
|
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
|
||||||
|
|
||||||
/**
|
|
||||||
* spin_can_lock - would spin_trylock() succeed?
|
|
||||||
* @lock: the spinlock in question.
|
|
||||||
*/
|
|
||||||
#define spin_can_lock(lock) (!spin_is_locked(lock))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
|
|
||||||
*/
|
|
||||||
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
||||||
# include <linux/spinlock_api_smp.h>
|
|
||||||
#else
|
|
||||||
# include <linux/spinlock_api_up.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __LINUX_SPINLOCK_H */
|
#endif /* __LINUX_SPINLOCK_H */
|
||||||
|
|
|
@ -17,165 +17,76 @@
|
||||||
|
|
||||||
int in_lock_functions(unsigned long addr);
|
int in_lock_functions(unsigned long addr);
|
||||||
|
|
||||||
#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
|
#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
|
||||||
|
|
||||||
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
|
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
|
||||||
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
|
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
|
||||||
__acquires(lock);
|
__acquires(lock);
|
||||||
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
|
void __lockfunc
|
||||||
|
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
|
||||||
__acquires(lock);
|
__acquires(lock);
|
||||||
void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
|
void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
|
||||||
void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
|
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
|
||||||
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
|
|
||||||
void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
|
|
||||||
void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
|
|
||||||
void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
|
|
||||||
void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
|
|
||||||
void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
|
|
||||||
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
|
|
||||||
__acquires(lock);
|
__acquires(lock);
|
||||||
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
|
|
||||||
|
unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
|
||||||
__acquires(lock);
|
__acquires(lock);
|
||||||
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
|
unsigned long __lockfunc
|
||||||
|
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
|
||||||
__acquires(lock);
|
__acquires(lock);
|
||||||
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
|
int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
|
||||||
__acquires(lock);
|
int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
|
||||||
int __lockfunc _spin_trylock(spinlock_t *lock);
|
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
|
||||||
int __lockfunc _read_trylock(rwlock_t *lock);
|
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
|
||||||
int __lockfunc _write_trylock(rwlock_t *lock);
|
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
|
||||||
int __lockfunc _spin_trylock_bh(spinlock_t *lock);
|
void __lockfunc
|
||||||
void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
|
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
|
||||||
void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
|
|
||||||
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
|
|
||||||
__releases(lock);
|
|
||||||
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
|
||||||
__releases(lock);
|
|
||||||
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
|
||||||
__releases(lock);
|
__releases(lock);
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_LOCK
|
#ifdef CONFIG_INLINE_SPIN_LOCK
|
||||||
#define _spin_lock(lock) __spin_lock(lock)
|
#define _raw_spin_lock(lock) __raw_spin_lock(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_LOCK
|
|
||||||
#define _read_lock(lock) __read_lock(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_LOCK
|
|
||||||
#define _write_lock(lock) __write_lock(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
|
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
|
||||||
#define _spin_lock_bh(lock) __spin_lock_bh(lock)
|
#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_LOCK_BH
|
|
||||||
#define _read_lock_bh(lock) __read_lock_bh(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_LOCK_BH
|
|
||||||
#define _write_lock_bh(lock) __write_lock_bh(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
|
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
|
||||||
#define _spin_lock_irq(lock) __spin_lock_irq(lock)
|
#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_LOCK_IRQ
|
|
||||||
#define _read_lock_irq(lock) __read_lock_irq(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
|
|
||||||
#define _write_lock_irq(lock) __write_lock_irq(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
|
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
|
||||||
#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
|
#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
|
|
||||||
#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
|
|
||||||
#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
|
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
|
||||||
#define _spin_trylock(lock) __spin_trylock(lock)
|
#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_TRYLOCK
|
|
||||||
#define _read_trylock(lock) __read_trylock(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_TRYLOCK
|
|
||||||
#define _write_trylock(lock) __write_trylock(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
|
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
|
||||||
#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
|
#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_UNLOCK
|
#ifdef CONFIG_INLINE_SPIN_UNLOCK
|
||||||
#define _spin_unlock(lock) __spin_unlock(lock)
|
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_UNLOCK
|
|
||||||
#define _read_unlock(lock) __read_unlock(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_UNLOCK
|
|
||||||
#define _write_unlock(lock) __write_unlock(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
|
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
|
||||||
#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
|
#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_UNLOCK_BH
|
|
||||||
#define _read_unlock_bh(lock) __read_unlock_bh(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
|
|
||||||
#define _write_unlock_bh(lock) __write_unlock_bh(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
|
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
|
||||||
#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
|
#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
|
|
||||||
#define _read_unlock_irq(lock) __read_unlock_irq(lock)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
|
|
||||||
#define _write_unlock_irq(lock) __write_unlock_irq(lock)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
|
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
|
||||||
#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
|
#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
|
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
|
|
||||||
#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline int __spin_trylock(spinlock_t *lock)
|
|
||||||
{
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
if (_raw_spin_trylock(lock)) {
|
if (do_raw_spin_trylock(lock)) {
|
||||||
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __read_trylock(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
preempt_disable();
|
|
||||||
if (_raw_read_trylock(lock)) {
|
|
||||||
rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
preempt_enable();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __write_trylock(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
preempt_disable();
|
|
||||||
if (_raw_write_trylock(lock)) {
|
|
||||||
rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
preempt_enable();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If lockdep is enabled then we use the non-preemption spin-ops
|
* If lockdep is enabled then we use the non-preemption spin-ops
|
||||||
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
||||||
|
@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
|
||||||
*/
|
*/
|
||||||
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
|
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
|
||||||
|
|
||||||
static inline void __read_lock(rwlock_t *lock)
|
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
|
||||||
{
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
|
||||||
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
/*
|
/*
|
||||||
* On lockdep we dont want the hand-coded irq-enable of
|
* On lockdep we dont want the hand-coded irq-enable of
|
||||||
* _raw_spin_lock_flags() code, because lockdep assumes
|
* do_raw_spin_lock_flags() code, because lockdep assumes
|
||||||
* that interrupts are not re-enabled during lock-acquire:
|
* that interrupts are not re-enabled during lock-acquire:
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||||
#else
|
#else
|
||||||
_raw_spin_lock_flags(lock, &flags);
|
do_raw_spin_lock_flags(lock, &flags);
|
||||||
#endif
|
#endif
|
||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __spin_lock_irq(spinlock_t *lock)
|
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __spin_lock_bh(spinlock_t *lock)
|
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
|
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
|
|
||||||
_raw_read_lock_flags, &flags);
|
|
||||||
return flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __read_lock_irq(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
local_irq_disable();
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __read_lock_bh(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
local_bh_disable();
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
|
|
||||||
_raw_write_lock_flags, &flags);
|
|
||||||
return flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __write_lock_irq(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
local_irq_disable();
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __write_lock_bh(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
local_bh_disable();
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __spin_lock(spinlock_t *lock)
|
|
||||||
{
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
||||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __write_lock(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
preempt_disable();
|
|
||||||
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
||||||
LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_PREEMPT */
|
#endif /* CONFIG_PREEMPT */
|
||||||
|
|
||||||
static inline void __spin_unlock(spinlock_t *lock)
|
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
spin_release(&lock->dep_map, 1, _RET_IP_);
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
_raw_spin_unlock(lock);
|
do_raw_spin_unlock(lock);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __write_unlock(rwlock_t *lock)
|
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_write_unlock(lock);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __read_unlock(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_read_unlock(lock);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __spin_unlock_irqrestore(spinlock_t *lock,
|
|
||||||
unsigned long flags)
|
unsigned long flags)
|
||||||
{
|
{
|
||||||
spin_release(&lock->dep_map, 1, _RET_IP_);
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
_raw_spin_unlock(lock);
|
do_raw_spin_unlock(lock);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __spin_unlock_irq(spinlock_t *lock)
|
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
spin_release(&lock->dep_map, 1, _RET_IP_);
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
_raw_spin_unlock(lock);
|
do_raw_spin_unlock(lock);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __spin_unlock_bh(spinlock_t *lock)
|
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
spin_release(&lock->dep_map, 1, _RET_IP_);
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
_raw_spin_unlock(lock);
|
do_raw_spin_unlock(lock);
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
|
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_read_unlock(lock);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __read_unlock_irq(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_read_unlock(lock);
|
|
||||||
local_irq_enable();
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __read_unlock_bh(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_read_unlock(lock);
|
|
||||||
preempt_enable_no_resched();
|
|
||||||
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __write_unlock_irqrestore(rwlock_t *lock,
|
|
||||||
unsigned long flags)
|
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_write_unlock(lock);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __write_unlock_irq(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_write_unlock(lock);
|
|
||||||
local_irq_enable();
|
|
||||||
preempt_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __write_unlock_bh(rwlock_t *lock)
|
|
||||||
{
|
|
||||||
rwlock_release(&lock->dep_map, 1, _RET_IP_);
|
|
||||||
_raw_write_unlock(lock);
|
|
||||||
preempt_enable_no_resched();
|
|
||||||
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __spin_trylock_bh(spinlock_t *lock)
|
|
||||||
{
|
{
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
if (_raw_spin_trylock(lock)) {
|
if (do_raw_spin_trylock(lock)) {
|
||||||
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include <linux/rwlock_api_smp.h>
|
||||||
|
|
||||||
#endif /* __LINUX_SPINLOCK_API_SMP_H */
|
#endif /* __LINUX_SPINLOCK_API_SMP_H */
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
#define in_lock_functions(ADDR) 0
|
#define in_lock_functions(ADDR) 0
|
||||||
|
|
||||||
#define assert_spin_locked(lock) do { (void)(lock); } while (0)
|
#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the UP-nondebug case there's no real locking going on, so the
|
* In the UP-nondebug case there's no real locking going on, so the
|
||||||
|
@ -40,7 +40,8 @@
|
||||||
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
|
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
|
||||||
|
|
||||||
#define __UNLOCK_BH(lock) \
|
#define __UNLOCK_BH(lock) \
|
||||||
do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
|
do { preempt_enable_no_resched(); local_bh_enable(); \
|
||||||
|
__release(lock); (void)(lock); } while (0)
|
||||||
|
|
||||||
#define __UNLOCK_IRQ(lock) \
|
#define __UNLOCK_IRQ(lock) \
|
||||||
do { local_irq_enable(); __UNLOCK(lock); } while (0)
|
do { local_irq_enable(); __UNLOCK(lock); } while (0)
|
||||||
|
@ -48,34 +49,37 @@
|
||||||
#define __UNLOCK_IRQRESTORE(lock, flags) \
|
#define __UNLOCK_IRQRESTORE(lock, flags) \
|
||||||
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
|
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
|
||||||
|
|
||||||
#define _spin_lock(lock) __LOCK(lock)
|
#define _raw_spin_lock(lock) __LOCK(lock)
|
||||||
#define _spin_lock_nested(lock, subclass) __LOCK(lock)
|
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
|
||||||
#define _read_lock(lock) __LOCK(lock)
|
#define _raw_read_lock(lock) __LOCK(lock)
|
||||||
#define _write_lock(lock) __LOCK(lock)
|
#define _raw_write_lock(lock) __LOCK(lock)
|
||||||
#define _spin_lock_bh(lock) __LOCK_BH(lock)
|
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
|
||||||
#define _read_lock_bh(lock) __LOCK_BH(lock)
|
#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
|
||||||
#define _write_lock_bh(lock) __LOCK_BH(lock)
|
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
|
||||||
#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
|
#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
|
||||||
#define _read_lock_irq(lock) __LOCK_IRQ(lock)
|
#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
|
||||||
#define _write_lock_irq(lock) __LOCK_IRQ(lock)
|
#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
|
||||||
#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
|
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
|
||||||
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
|
#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
|
||||||
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
|
#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
|
||||||
#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
|
#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
|
||||||
#define _read_trylock(lock) ({ __LOCK(lock); 1; })
|
#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
|
||||||
#define _write_trylock(lock) ({ __LOCK(lock); 1; })
|
#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
|
||||||
#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
|
#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
|
||||||
#define _spin_unlock(lock) __UNLOCK(lock)
|
#define _raw_spin_unlock(lock) __UNLOCK(lock)
|
||||||
#define _read_unlock(lock) __UNLOCK(lock)
|
#define _raw_read_unlock(lock) __UNLOCK(lock)
|
||||||
#define _write_unlock(lock) __UNLOCK(lock)
|
#define _raw_write_unlock(lock) __UNLOCK(lock)
|
||||||
#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
|
#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
|
||||||
#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
|
#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
|
||||||
#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
|
#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
|
||||||
#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
|
#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
|
||||||
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
|
#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
|
||||||
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
|
#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
|
||||||
#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
|
#define _raw_spin_unlock_irqrestore(lock, flags) \
|
||||||
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
|
__UNLOCK_IRQRESTORE(lock, flags)
|
||||||
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
|
#define _raw_read_unlock_irqrestore(lock, flags) \
|
||||||
|
__UNLOCK_IRQRESTORE(lock, flags)
|
||||||
|
#define _raw_write_unlock_irqrestore(lock, flags) \
|
||||||
|
__UNLOCK_IRQRESTORE(lock, flags)
|
||||||
|
|
||||||
#endif /* __LINUX_SPINLOCK_API_UP_H */
|
#endif /* __LINUX_SPINLOCK_API_UP_H */
|
||||||
|
|
|
@ -17,8 +17,8 @@
|
||||||
|
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
|
|
||||||
typedef struct {
|
typedef struct raw_spinlock {
|
||||||
raw_spinlock_t raw_lock;
|
arch_spinlock_t raw_lock;
|
||||||
#ifdef CONFIG_GENERIC_LOCKBREAK
|
#ifdef CONFIG_GENERIC_LOCKBREAK
|
||||||
unsigned int break_lock;
|
unsigned int break_lock;
|
||||||
#endif
|
#endif
|
||||||
|
@ -29,26 +29,10 @@ typedef struct {
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
struct lockdep_map dep_map;
|
struct lockdep_map dep_map;
|
||||||
#endif
|
#endif
|
||||||
} spinlock_t;
|
} raw_spinlock_t;
|
||||||
|
|
||||||
#define SPINLOCK_MAGIC 0xdead4ead
|
#define SPINLOCK_MAGIC 0xdead4ead
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
raw_rwlock_t raw_lock;
|
|
||||||
#ifdef CONFIG_GENERIC_LOCKBREAK
|
|
||||||
unsigned int break_lock;
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
||||||
unsigned int magic, owner_cpu;
|
|
||||||
void *owner;
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
struct lockdep_map dep_map;
|
|
||||||
#endif
|
|
||||||
} rwlock_t;
|
|
||||||
|
|
||||||
#define RWLOCK_MAGIC 0xdeaf1eed
|
|
||||||
|
|
||||||
#define SPINLOCK_OWNER_INIT ((void *)-1L)
|
#define SPINLOCK_OWNER_INIT ((void *)-1L)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
@ -57,44 +41,56 @@ typedef struct {
|
||||||
# define SPIN_DEP_MAP_INIT(lockname)
|
# define SPIN_DEP_MAP_INIT(lockname)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
|
# define SPIN_DEBUG_INIT(lockname) \
|
||||||
|
.magic = SPINLOCK_MAGIC, \
|
||||||
|
.owner_cpu = -1, \
|
||||||
|
.owner = SPINLOCK_OWNER_INIT,
|
||||||
#else
|
#else
|
||||||
# define RW_DEP_MAP_INIT(lockname)
|
# define SPIN_DEBUG_INIT(lockname)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
|
||||||
# define __SPIN_LOCK_UNLOCKED(lockname) \
|
{ \
|
||||||
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
|
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
||||||
.magic = SPINLOCK_MAGIC, \
|
SPIN_DEBUG_INIT(lockname) \
|
||||||
.owner = SPINLOCK_OWNER_INIT, \
|
|
||||||
.owner_cpu = -1, \
|
|
||||||
SPIN_DEP_MAP_INIT(lockname) }
|
SPIN_DEP_MAP_INIT(lockname) }
|
||||||
#define __RW_LOCK_UNLOCKED(lockname) \
|
|
||||||
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
|
#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
|
||||||
.magic = RWLOCK_MAGIC, \
|
(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
|
||||||
.owner = SPINLOCK_OWNER_INIT, \
|
|
||||||
.owner_cpu = -1, \
|
#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
|
||||||
RW_DEP_MAP_INIT(lockname) }
|
|
||||||
#else
|
typedef struct spinlock {
|
||||||
# define __SPIN_LOCK_UNLOCKED(lockname) \
|
union {
|
||||||
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
|
struct raw_spinlock rlock;
|
||||||
SPIN_DEP_MAP_INIT(lockname) }
|
|
||||||
#define __RW_LOCK_UNLOCKED(lockname) \
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
|
# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
|
||||||
RW_DEP_MAP_INIT(lockname) }
|
struct {
|
||||||
|
u8 __padding[LOCK_PADSIZE];
|
||||||
|
struct lockdep_map dep_map;
|
||||||
|
};
|
||||||
#endif
|
#endif
|
||||||
|
};
|
||||||
|
} spinlock_t;
|
||||||
|
|
||||||
|
#define __SPIN_LOCK_INITIALIZER(lockname) \
|
||||||
|
{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
|
||||||
|
|
||||||
|
#define __SPIN_LOCK_UNLOCKED(lockname) \
|
||||||
|
(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
|
* SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
|
||||||
* are hence deprecated.
|
* deprecated.
|
||||||
* Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
|
* Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
|
||||||
* __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
|
* appropriate.
|
||||||
*/
|
*/
|
||||||
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
|
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
|
||||||
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
|
|
||||||
|
|
||||||
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
|
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
|
||||||
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
|
|
||||||
|
#include <linux/rwlock_types.h>
|
||||||
|
|
||||||
#endif /* __LINUX_SPINLOCK_TYPES_H */
|
#endif /* __LINUX_SPINLOCK_TYPES_H */
|
||||||
|
|
|
@ -16,22 +16,22 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
volatile unsigned int slock;
|
volatile unsigned int slock;
|
||||||
} raw_spinlock_t;
|
} arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
typedef struct { } raw_spinlock_t;
|
typedef struct { } arch_spinlock_t;
|
||||||
|
|
||||||
#define __RAW_SPIN_LOCK_UNLOCKED { }
|
#define __ARCH_SPIN_LOCK_UNLOCKED { }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
/* no debug version on UP */
|
/* no debug version on UP */
|
||||||
} raw_rwlock_t;
|
} arch_rwlock_t;
|
||||||
|
|
||||||
#define __RAW_RW_LOCK_UNLOCKED { }
|
#define __ARCH_RW_LOCK_UNLOCKED { }
|
||||||
|
|
||||||
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
|
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
|
||||||
|
|
|
@ -18,21 +18,21 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||||
#define __raw_spin_is_locked(x) ((x)->slock == 0)
|
#define arch_spin_is_locked(x) ((x)->slock == 0)
|
||||||
|
|
||||||
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
lock->slock = 0;
|
lock->slock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
|
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
||||||
{
|
{
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
lock->slock = 0;
|
lock->slock = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
char oldval = lock->slock;
|
char oldval = lock->slock;
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
||||||
return oldval > 0;
|
return oldval > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||||
{
|
{
|
||||||
lock->slock = 1;
|
lock->slock = 1;
|
||||||
}
|
}
|
||||||
|
@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||||
/*
|
/*
|
||||||
* Read-write spinlocks. No debug version.
|
* Read-write spinlocks. No debug version.
|
||||||
*/
|
*/
|
||||||
#define __raw_read_lock(lock) do { (void)(lock); } while (0)
|
#define arch_read_lock(lock) do { (void)(lock); } while (0)
|
||||||
#define __raw_write_lock(lock) do { (void)(lock); } while (0)
|
#define arch_write_lock(lock) do { (void)(lock); } while (0)
|
||||||
#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
|
#define arch_read_trylock(lock) ({ (void)(lock); 1; })
|
||||||
#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
|
#define arch_write_trylock(lock) ({ (void)(lock); 1; })
|
||||||
#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
|
#define arch_read_unlock(lock) do { (void)(lock); } while (0)
|
||||||
#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
|
#define arch_write_unlock(lock) do { (void)(lock); } while (0)
|
||||||
|
|
||||||
#else /* DEBUG_SPINLOCK */
|
#else /* DEBUG_SPINLOCK */
|
||||||
#define __raw_spin_is_locked(lock) ((void)(lock), 0)
|
#define arch_spin_is_locked(lock) ((void)(lock), 0)
|
||||||
/* for sched.c and kernel_lock.c: */
|
/* for sched.c and kernel_lock.c: */
|
||||||
# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
|
# define arch_spin_lock(lock) do { (void)(lock); } while (0)
|
||||||
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
|
# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
|
||||||
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
|
# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
|
||||||
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
|
# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
|
||||||
#endif /* DEBUG_SPINLOCK */
|
#endif /* DEBUG_SPINLOCK */
|
||||||
|
|
||||||
#define __raw_spin_is_contended(lock) (((void)(lock), 0))
|
#define arch_spin_is_contended(lock) (((void)(lock), 0))
|
||||||
|
|
||||||
#define __raw_read_can_lock(lock) (((void)(lock), 1))
|
#define arch_read_can_lock(lock) (((void)(lock), 1))
|
||||||
#define __raw_write_can_lock(lock) (((void)(lock), 1))
|
#define arch_write_can_lock(lock) (((void)(lock), 1))
|
||||||
|
|
||||||
#define __raw_spin_unlock_wait(lock) \
|
#define arch_spin_unlock_wait(lock) \
|
||||||
do { cpu_relax(); } while (__raw_spin_is_locked(lock))
|
do { cpu_relax(); } while (arch_spin_is_locked(lock))
|
||||||
|
|
||||||
#endif /* __LINUX_SPINLOCK_UP_H */
|
#endif /* __LINUX_SPINLOCK_UP_H */
|
||||||
|
|
|
@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code)
|
||||||
* an exiting task cleaning up the robust pi futexes.
|
* an exiting task cleaning up the robust pi futexes.
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
spin_unlock_wait(&tsk->pi_lock);
|
raw_spin_unlock_wait(&tsk->pi_lock);
|
||||||
|
|
||||||
if (unlikely(in_atomic()))
|
if (unlikely(in_atomic()))
|
||||||
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
||||||
|
|
|
@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
||||||
|
|
||||||
static void rt_mutex_init_task(struct task_struct *p)
|
static void rt_mutex_init_task(struct task_struct *p)
|
||||||
{
|
{
|
||||||
spin_lock_init(&p->pi_lock);
|
raw_spin_lock_init(&p->pi_lock);
|
||||||
#ifdef CONFIG_RT_MUTEXES
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
plist_head_init(&p->pi_waiters, &p->pi_lock);
|
plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
|
||||||
p->pi_blocked_on = NULL;
|
p->pi_blocked_on = NULL;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
|
||||||
* and has cleaned up the pi_state already
|
* and has cleaned up the pi_state already
|
||||||
*/
|
*/
|
||||||
if (pi_state->owner) {
|
if (pi_state->owner) {
|
||||||
spin_lock_irq(&pi_state->owner->pi_lock);
|
raw_spin_lock_irq(&pi_state->owner->pi_lock);
|
||||||
list_del_init(&pi_state->list);
|
list_del_init(&pi_state->list);
|
||||||
spin_unlock_irq(&pi_state->owner->pi_lock);
|
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||||
|
|
||||||
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
|
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
|
||||||
}
|
}
|
||||||
|
@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr)
|
||||||
* pi_state_list anymore, but we have to be careful
|
* pi_state_list anymore, but we have to be careful
|
||||||
* versus waiters unqueueing themselves:
|
* versus waiters unqueueing themselves:
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&curr->pi_lock);
|
raw_spin_lock_irq(&curr->pi_lock);
|
||||||
while (!list_empty(head)) {
|
while (!list_empty(head)) {
|
||||||
|
|
||||||
next = head->next;
|
next = head->next;
|
||||||
pi_state = list_entry(next, struct futex_pi_state, list);
|
pi_state = list_entry(next, struct futex_pi_state, list);
|
||||||
key = pi_state->key;
|
key = pi_state->key;
|
||||||
hb = hash_futex(&key);
|
hb = hash_futex(&key);
|
||||||
spin_unlock_irq(&curr->pi_lock);
|
raw_spin_unlock_irq(&curr->pi_lock);
|
||||||
|
|
||||||
spin_lock(&hb->lock);
|
spin_lock(&hb->lock);
|
||||||
|
|
||||||
spin_lock_irq(&curr->pi_lock);
|
raw_spin_lock_irq(&curr->pi_lock);
|
||||||
/*
|
/*
|
||||||
* We dropped the pi-lock, so re-check whether this
|
* We dropped the pi-lock, so re-check whether this
|
||||||
* task still owns the PI-state:
|
* task still owns the PI-state:
|
||||||
|
@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr)
|
||||||
WARN_ON(list_empty(&pi_state->list));
|
WARN_ON(list_empty(&pi_state->list));
|
||||||
list_del_init(&pi_state->list);
|
list_del_init(&pi_state->list);
|
||||||
pi_state->owner = NULL;
|
pi_state->owner = NULL;
|
||||||
spin_unlock_irq(&curr->pi_lock);
|
raw_spin_unlock_irq(&curr->pi_lock);
|
||||||
|
|
||||||
rt_mutex_unlock(&pi_state->pi_mutex);
|
rt_mutex_unlock(&pi_state->pi_mutex);
|
||||||
|
|
||||||
spin_unlock(&hb->lock);
|
spin_unlock(&hb->lock);
|
||||||
|
|
||||||
spin_lock_irq(&curr->pi_lock);
|
raw_spin_lock_irq(&curr->pi_lock);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&curr->pi_lock);
|
raw_spin_unlock_irq(&curr->pi_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||||
* change of the task flags, we do this protected by
|
* change of the task flags, we do this protected by
|
||||||
* p->pi_lock:
|
* p->pi_lock:
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&p->pi_lock);
|
raw_spin_lock_irq(&p->pi_lock);
|
||||||
if (unlikely(p->flags & PF_EXITING)) {
|
if (unlikely(p->flags & PF_EXITING)) {
|
||||||
/*
|
/*
|
||||||
* The task is on the way out. When PF_EXITPIDONE is
|
* The task is on the way out. When PF_EXITPIDONE is
|
||||||
|
@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||||
*/
|
*/
|
||||||
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
|
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
|
||||||
|
|
||||||
spin_unlock_irq(&p->pi_lock);
|
raw_spin_unlock_irq(&p->pi_lock);
|
||||||
put_task_struct(p);
|
put_task_struct(p);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||||
WARN_ON(!list_empty(&pi_state->list));
|
WARN_ON(!list_empty(&pi_state->list));
|
||||||
list_add(&pi_state->list, &p->pi_state_list);
|
list_add(&pi_state->list, &p->pi_state_list);
|
||||||
pi_state->owner = p;
|
pi_state->owner = p;
|
||||||
spin_unlock_irq(&p->pi_lock);
|
raw_spin_unlock_irq(&p->pi_lock);
|
||||||
|
|
||||||
put_task_struct(p);
|
put_task_struct(p);
|
||||||
|
|
||||||
|
@ -760,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
|
||||||
if (!pi_state)
|
if (!pi_state)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock(&pi_state->pi_mutex.wait_lock);
|
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
|
||||||
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
|
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -789,23 +789,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
|
||||||
else if (curval != uval)
|
else if (curval != uval)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
if (ret) {
|
if (ret) {
|
||||||
spin_unlock(&pi_state->pi_mutex.wait_lock);
|
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&pi_state->owner->pi_lock);
|
raw_spin_lock_irq(&pi_state->owner->pi_lock);
|
||||||
WARN_ON(list_empty(&pi_state->list));
|
WARN_ON(list_empty(&pi_state->list));
|
||||||
list_del_init(&pi_state->list);
|
list_del_init(&pi_state->list);
|
||||||
spin_unlock_irq(&pi_state->owner->pi_lock);
|
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||||
|
|
||||||
spin_lock_irq(&new_owner->pi_lock);
|
raw_spin_lock_irq(&new_owner->pi_lock);
|
||||||
WARN_ON(!list_empty(&pi_state->list));
|
WARN_ON(!list_empty(&pi_state->list));
|
||||||
list_add(&pi_state->list, &new_owner->pi_state_list);
|
list_add(&pi_state->list, &new_owner->pi_state_list);
|
||||||
pi_state->owner = new_owner;
|
pi_state->owner = new_owner;
|
||||||
spin_unlock_irq(&new_owner->pi_lock);
|
raw_spin_unlock_irq(&new_owner->pi_lock);
|
||||||
|
|
||||||
spin_unlock(&pi_state->pi_mutex.wait_lock);
|
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||||
rt_mutex_unlock(&pi_state->pi_mutex);
|
rt_mutex_unlock(&pi_state->pi_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1010,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
|
||||||
plist_add(&q->list, &hb2->chain);
|
plist_add(&q->list, &hb2->chain);
|
||||||
q->lock_ptr = &hb2->lock;
|
q->lock_ptr = &hb2->lock;
|
||||||
#ifdef CONFIG_DEBUG_PI_LIST
|
#ifdef CONFIG_DEBUG_PI_LIST
|
||||||
q->list.plist.lock = &hb2->lock;
|
q->list.plist.spinlock = &hb2->lock;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
get_futex_key_refs(key2);
|
get_futex_key_refs(key2);
|
||||||
|
@ -1046,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
|
||||||
|
|
||||||
q->lock_ptr = &hb->lock;
|
q->lock_ptr = &hb->lock;
|
||||||
#ifdef CONFIG_DEBUG_PI_LIST
|
#ifdef CONFIG_DEBUG_PI_LIST
|
||||||
q->list.plist.lock = &hb->lock;
|
q->list.plist.spinlock = &hb->lock;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
wake_up_state(q->task, TASK_NORMAL);
|
wake_up_state(q->task, TASK_NORMAL);
|
||||||
|
@ -1394,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
||||||
|
|
||||||
plist_node_init(&q->list, prio);
|
plist_node_init(&q->list, prio);
|
||||||
#ifdef CONFIG_DEBUG_PI_LIST
|
#ifdef CONFIG_DEBUG_PI_LIST
|
||||||
q->list.plist.lock = &hb->lock;
|
q->list.plist.spinlock = &hb->lock;
|
||||||
#endif
|
#endif
|
||||||
plist_add(&q->list, &hb->chain);
|
plist_add(&q->list, &hb->chain);
|
||||||
q->task = current;
|
q->task = current;
|
||||||
|
@ -1529,18 +1529,18 @@ retry:
|
||||||
* itself.
|
* itself.
|
||||||
*/
|
*/
|
||||||
if (pi_state->owner != NULL) {
|
if (pi_state->owner != NULL) {
|
||||||
spin_lock_irq(&pi_state->owner->pi_lock);
|
raw_spin_lock_irq(&pi_state->owner->pi_lock);
|
||||||
WARN_ON(list_empty(&pi_state->list));
|
WARN_ON(list_empty(&pi_state->list));
|
||||||
list_del_init(&pi_state->list);
|
list_del_init(&pi_state->list);
|
||||||
spin_unlock_irq(&pi_state->owner->pi_lock);
|
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
pi_state->owner = newowner;
|
pi_state->owner = newowner;
|
||||||
|
|
||||||
spin_lock_irq(&newowner->pi_lock);
|
raw_spin_lock_irq(&newowner->pi_lock);
|
||||||
WARN_ON(!list_empty(&pi_state->list));
|
WARN_ON(!list_empty(&pi_state->list));
|
||||||
list_add(&pi_state->list, &newowner->pi_state_list);
|
list_add(&pi_state->list, &newowner->pi_state_list);
|
||||||
spin_unlock_irq(&newowner->pi_lock);
|
raw_spin_unlock_irq(&newowner->pi_lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
|
||||||
for (;;) {
|
for (;;) {
|
||||||
base = timer->base;
|
base = timer->base;
|
||||||
if (likely(base != NULL)) {
|
if (likely(base != NULL)) {
|
||||||
spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
||||||
if (likely(base == timer->base))
|
if (likely(base == timer->base))
|
||||||
return base;
|
return base;
|
||||||
/* The timer has migrated to another CPU: */
|
/* The timer has migrated to another CPU: */
|
||||||
spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
|
raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
|
||||||
}
|
}
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
@ -208,13 +208,13 @@ again:
|
||||||
|
|
||||||
/* See the comment in lock_timer_base() */
|
/* See the comment in lock_timer_base() */
|
||||||
timer->base = NULL;
|
timer->base = NULL;
|
||||||
spin_unlock(&base->cpu_base->lock);
|
raw_spin_unlock(&base->cpu_base->lock);
|
||||||
spin_lock(&new_base->cpu_base->lock);
|
raw_spin_lock(&new_base->cpu_base->lock);
|
||||||
|
|
||||||
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
|
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
|
||||||
cpu = this_cpu;
|
cpu = this_cpu;
|
||||||
spin_unlock(&new_base->cpu_base->lock);
|
raw_spin_unlock(&new_base->cpu_base->lock);
|
||||||
spin_lock(&base->cpu_base->lock);
|
raw_spin_lock(&base->cpu_base->lock);
|
||||||
timer->base = base;
|
timer->base = base;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
||||||
{
|
{
|
||||||
struct hrtimer_clock_base *base = timer->base;
|
struct hrtimer_clock_base *base = timer->base;
|
||||||
|
|
||||||
spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
||||||
|
|
||||||
return base;
|
return base;
|
||||||
}
|
}
|
||||||
|
@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg)
|
||||||
base = &__get_cpu_var(hrtimer_bases);
|
base = &__get_cpu_var(hrtimer_bases);
|
||||||
|
|
||||||
/* Adjust CLOCK_REALTIME offset */
|
/* Adjust CLOCK_REALTIME offset */
|
||||||
spin_lock(&base->lock);
|
raw_spin_lock(&base->lock);
|
||||||
base->clock_base[CLOCK_REALTIME].offset =
|
base->clock_base[CLOCK_REALTIME].offset =
|
||||||
timespec_to_ktime(realtime_offset);
|
timespec_to_ktime(realtime_offset);
|
||||||
|
|
||||||
hrtimer_force_reprogram(base, 0);
|
hrtimer_force_reprogram(base, 0);
|
||||||
spin_unlock(&base->lock);
|
raw_spin_unlock(&base->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||||
{
|
{
|
||||||
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
|
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
|
||||||
if (wakeup) {
|
if (wakeup) {
|
||||||
spin_unlock(&base->cpu_base->lock);
|
raw_spin_unlock(&base->cpu_base->lock);
|
||||||
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||||
spin_lock(&base->cpu_base->lock);
|
raw_spin_lock(&base->cpu_base->lock);
|
||||||
} else
|
} else
|
||||||
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||||
|
|
||||||
|
@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
|
||||||
static inline
|
static inline
|
||||||
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
||||||
{
|
{
|
||||||
spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
|
raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_irqsave(&cpu_base->lock, flags);
|
raw_spin_lock_irqsave(&cpu_base->lock, flags);
|
||||||
|
|
||||||
if (!hrtimer_hres_active()) {
|
if (!hrtimer_hres_active()) {
|
||||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
||||||
|
@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&cpu_base->lock, flags);
|
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
|
||||||
|
|
||||||
if (mindelta.tv64 < 0)
|
if (mindelta.tv64 < 0)
|
||||||
mindelta.tv64 = 0;
|
mindelta.tv64 = 0;
|
||||||
|
@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
|
||||||
* they get migrated to another cpu, therefore its safe to unlock
|
* they get migrated to another cpu, therefore its safe to unlock
|
||||||
* the timer base.
|
* the timer base.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&cpu_base->lock);
|
raw_spin_unlock(&cpu_base->lock);
|
||||||
trace_hrtimer_expire_entry(timer, now);
|
trace_hrtimer_expire_entry(timer, now);
|
||||||
restart = fn(timer);
|
restart = fn(timer);
|
||||||
trace_hrtimer_expire_exit(timer);
|
trace_hrtimer_expire_exit(timer);
|
||||||
spin_lock(&cpu_base->lock);
|
raw_spin_lock(&cpu_base->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
|
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
|
||||||
|
@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||||||
retry:
|
retry:
|
||||||
expires_next.tv64 = KTIME_MAX;
|
expires_next.tv64 = KTIME_MAX;
|
||||||
|
|
||||||
spin_lock(&cpu_base->lock);
|
raw_spin_lock(&cpu_base->lock);
|
||||||
/*
|
/*
|
||||||
* We set expires_next to KTIME_MAX here with cpu_base->lock
|
* We set expires_next to KTIME_MAX here with cpu_base->lock
|
||||||
* held to prevent that a timer is enqueued in our queue via
|
* held to prevent that a timer is enqueued in our queue via
|
||||||
|
@ -1317,7 +1317,7 @@ retry:
|
||||||
* against it.
|
* against it.
|
||||||
*/
|
*/
|
||||||
cpu_base->expires_next = expires_next;
|
cpu_base->expires_next = expires_next;
|
||||||
spin_unlock(&cpu_base->lock);
|
raw_spin_unlock(&cpu_base->lock);
|
||||||
|
|
||||||
/* Reprogramming necessary ? */
|
/* Reprogramming necessary ? */
|
||||||
if (expires_next.tv64 == KTIME_MAX ||
|
if (expires_next.tv64 == KTIME_MAX ||
|
||||||
|
@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void)
|
||||||
gettime = 0;
|
gettime = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&cpu_base->lock);
|
raw_spin_lock(&cpu_base->lock);
|
||||||
|
|
||||||
while ((node = base->first)) {
|
while ((node = base->first)) {
|
||||||
struct hrtimer *timer;
|
struct hrtimer *timer;
|
||||||
|
@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void)
|
||||||
|
|
||||||
__run_hrtimer(timer, &base->softirq_time);
|
__run_hrtimer(timer, &base->softirq_time);
|
||||||
}
|
}
|
||||||
spin_unlock(&cpu_base->lock);
|
raw_spin_unlock(&cpu_base->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
|
||||||
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
|
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_init(&cpu_base->lock);
|
raw_spin_lock_init(&cpu_base->lock);
|
||||||
|
|
||||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
||||||
cpu_base->clock_base[i].cpu_base = cpu_base;
|
cpu_base->clock_base[i].cpu_base = cpu_base;
|
||||||
|
@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu)
|
||||||
* The caller is globally serialized and nobody else
|
* The caller is globally serialized and nobody else
|
||||||
* takes two locks at once, deadlock is not possible.
|
* takes two locks at once, deadlock is not possible.
|
||||||
*/
|
*/
|
||||||
spin_lock(&new_base->lock);
|
raw_spin_lock(&new_base->lock);
|
||||||
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
||||||
|
|
||||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||||
migrate_hrtimer_list(&old_base->clock_base[i],
|
migrate_hrtimer_list(&old_base->clock_base[i],
|
||||||
&new_base->clock_base[i]);
|
&new_base->clock_base[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&old_base->lock);
|
raw_spin_unlock(&old_base->lock);
|
||||||
spin_unlock(&new_base->lock);
|
raw_spin_unlock(&new_base->lock);
|
||||||
|
|
||||||
/* Check, if we got expired work to do */
|
/* Check, if we got expired work to do */
|
||||||
__hrtimer_peek_ahead_timers();
|
__hrtimer_peek_ahead_timers();
|
||||||
|
|
|
@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk)
|
||||||
|
|
||||||
list = &ctx->event_list;
|
list = &ctx->event_list;
|
||||||
|
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The current breakpoint counter is not included in the list
|
* The current breakpoint counter is not included in the list
|
||||||
|
@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk)
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
|
||||||
* flush such a longstanding irq before considering it as spurious.
|
* flush such a longstanding irq before considering it as spurious.
|
||||||
*/
|
*/
|
||||||
for_each_irq_desc_reverse(i, desc) {
|
for_each_irq_desc_reverse(i, desc) {
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||||
/*
|
/*
|
||||||
* An old-style architecture might still have
|
* An old-style architecture might still have
|
||||||
|
@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
|
||||||
desc->chip->set_type(i, IRQ_TYPE_PROBE);
|
desc->chip->set_type(i, IRQ_TYPE_PROBE);
|
||||||
desc->chip->startup(i);
|
desc->chip->startup(i);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for longstanding interrupts to trigger. */
|
/* Wait for longstanding interrupts to trigger. */
|
||||||
|
@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
|
||||||
* happened in the previous stage, it may have masked itself)
|
* happened in the previous stage, it may have masked itself)
|
||||||
*/
|
*/
|
||||||
for_each_irq_desc_reverse(i, desc) {
|
for_each_irq_desc_reverse(i, desc) {
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||||
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
|
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
|
||||||
if (desc->chip->startup(i))
|
if (desc->chip->startup(i))
|
||||||
desc->status |= IRQ_PENDING;
|
desc->status |= IRQ_PENDING;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
|
||||||
* Now filter out any obviously spurious interrupts
|
* Now filter out any obviously spurious interrupts
|
||||||
*/
|
*/
|
||||||
for_each_irq_desc(i, desc) {
|
for_each_irq_desc(i, desc) {
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
status = desc->status;
|
status = desc->status;
|
||||||
|
|
||||||
if (status & IRQ_AUTODETECT) {
|
if (status & IRQ_AUTODETECT) {
|
||||||
|
@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
|
||||||
if (i < 32)
|
if (i < 32)
|
||||||
mask |= 1 << i;
|
mask |= 1 << i;
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return mask;
|
return mask;
|
||||||
|
@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_irq_desc(i, desc) {
|
for_each_irq_desc(i, desc) {
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
status = desc->status;
|
status = desc->status;
|
||||||
|
|
||||||
if (status & IRQ_AUTODETECT) {
|
if (status & IRQ_AUTODETECT) {
|
||||||
|
@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
||||||
desc->status = status & ~IRQ_AUTODETECT;
|
desc->status = status & ~IRQ_AUTODETECT;
|
||||||
desc->chip->shutdown(i);
|
desc->chip->shutdown(i);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
mutex_unlock(&probing_active);
|
mutex_unlock(&probing_active);
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
|
||||||
unsigned int status;
|
unsigned int status;
|
||||||
|
|
||||||
for_each_irq_desc(i, desc) {
|
for_each_irq_desc(i, desc) {
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
status = desc->status;
|
status = desc->status;
|
||||||
|
|
||||||
if (status & IRQ_AUTODETECT) {
|
if (status & IRQ_AUTODETECT) {
|
||||||
|
@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
|
||||||
desc->status = status & ~IRQ_AUTODETECT;
|
desc->status = status & ~IRQ_AUTODETECT;
|
||||||
desc->chip->shutdown(i);
|
desc->chip->shutdown(i);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
mutex_unlock(&probing_active);
|
mutex_unlock(&probing_active);
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure we don't have left over values from a previous use of this irq */
|
/* Ensure we don't have left over values from a previous use of this irq */
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->status = IRQ_DISABLED;
|
desc->status = IRQ_DISABLED;
|
||||||
desc->chip = &no_irq_chip;
|
desc->chip = &no_irq_chip;
|
||||||
desc->handle_irq = handle_bad_irq;
|
desc->handle_irq = handle_bad_irq;
|
||||||
|
@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
|
||||||
cpumask_clear(desc->pending_mask);
|
cpumask_clear(desc->pending_mask);
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
if (desc->action) {
|
if (desc->action) {
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
|
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
|
||||||
irq);
|
irq);
|
||||||
return;
|
return;
|
||||||
|
@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq)
|
||||||
desc->chip = &no_irq_chip;
|
desc->chip = &no_irq_chip;
|
||||||
desc->name = NULL;
|
desc->name = NULL;
|
||||||
clear_kstat_irqs(desc);
|
clear_kstat_irqs(desc);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
|
||||||
if (!chip)
|
if (!chip)
|
||||||
chip = &no_irq_chip;
|
chip = &no_irq_chip;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
irq_chip_set_defaults(chip);
|
irq_chip_set_defaults(chip);
|
||||||
desc->chip = chip;
|
desc->chip = chip;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type)
|
||||||
if (type == IRQ_TYPE_NONE)
|
if (type == IRQ_TYPE_NONE)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
ret = __irq_set_trigger(desc, irq, type);
|
ret = __irq_set_trigger(desc, irq, type);
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(set_irq_type);
|
EXPORT_SYMBOL(set_irq_type);
|
||||||
|
@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->handler_data = data;
|
desc->handler_data = data;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(set_irq_data);
|
EXPORT_SYMBOL(set_irq_data);
|
||||||
|
@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->msi_desc = entry;
|
desc->msi_desc = entry;
|
||||||
if (entry)
|
if (entry)
|
||||||
entry->irq = irq;
|
entry->irq = irq;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->chip_data = data;
|
desc->chip_data = data;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest)
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
if (nest)
|
if (nest)
|
||||||
desc->status |= IRQ_NESTED_THREAD;
|
desc->status |= IRQ_NESTED_THREAD;
|
||||||
else
|
else
|
||||||
desc->status &= ~IRQ_NESTED_THREAD;
|
desc->status &= ~IRQ_NESTED_THREAD;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
|
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
|
||||||
|
|
||||||
|
@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
|
|
||||||
kstat_incr_irqs_this_cpu(irq, desc);
|
kstat_incr_irqs_this_cpu(irq, desc);
|
||||||
|
|
||||||
|
@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
desc->status |= IRQ_INPROGRESS;
|
desc->status |= IRQ_INPROGRESS;
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
|
|
||||||
action_ret = action->thread_fn(action->irq, action->dev_id);
|
action_ret = action->thread_fn(action->irq, action->dev_id);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
|
||||||
spin_lock_irq(&desc->lock);
|
raw_spin_lock_irq(&desc->lock);
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(handle_nested_irq);
|
EXPORT_SYMBOL_GPL(handle_nested_irq);
|
||||||
|
|
||||||
|
@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
struct irqaction *action;
|
struct irqaction *action;
|
||||||
irqreturn_t action_ret;
|
irqreturn_t action_ret;
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
desc->status |= IRQ_INPROGRESS;
|
desc->status |= IRQ_INPROGRESS;
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
action_ret = handle_IRQ_event(irq, action);
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
struct irqaction *action;
|
struct irqaction *action;
|
||||||
irqreturn_t action_ret;
|
irqreturn_t action_ret;
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
mask_ack_irq(desc, irq);
|
mask_ack_irq(desc, irq);
|
||||||
|
|
||||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||||
|
@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
desc->status |= IRQ_INPROGRESS;
|
desc->status |= IRQ_INPROGRESS;
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
action_ret = handle_IRQ_event(irq, action);
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
|
|
||||||
if (unlikely(desc->status & IRQ_ONESHOT))
|
if (unlikely(desc->status & IRQ_ONESHOT))
|
||||||
|
@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
|
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
|
||||||
desc->chip->unmask(irq);
|
desc->chip->unmask(irq);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(handle_level_irq);
|
EXPORT_SYMBOL_GPL(handle_level_irq);
|
||||||
|
|
||||||
|
@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
struct irqaction *action;
|
struct irqaction *action;
|
||||||
irqreturn_t action_ret;
|
irqreturn_t action_ret;
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
|
|
||||||
desc->status |= IRQ_INPROGRESS;
|
desc->status |= IRQ_INPROGRESS;
|
||||||
desc->status &= ~IRQ_PENDING;
|
desc->status &= ~IRQ_PENDING;
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
action_ret = handle_IRQ_event(irq, action);
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
out:
|
out:
|
||||||
desc->chip->eoi(irq);
|
desc->chip->eoi(irq);
|
||||||
|
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -530,7 +530,7 @@ out:
|
||||||
void
|
void
|
||||||
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||||
|
|
||||||
|
@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
desc->status &= ~IRQ_PENDING;
|
desc->status &= ~IRQ_PENDING;
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
action_ret = handle_IRQ_event(irq, action);
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
|
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
|
||||||
|
|
||||||
desc->status &= ~IRQ_INPROGRESS;
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
}
|
}
|
||||||
|
|
||||||
chip_bus_lock(irq, desc);
|
chip_bus_lock(irq, desc);
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
/* Uninstall? */
|
/* Uninstall? */
|
||||||
if (handle == handle_bad_irq) {
|
if (handle == handle_bad_irq) {
|
||||||
|
@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
desc->depth = 0;
|
desc->depth = 0;
|
||||||
desc->chip->startup(irq);
|
desc->chip->startup(irq);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
chip_bus_sync_unlock(irq, desc);
|
chip_bus_sync_unlock(irq, desc);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__set_irq_handler);
|
EXPORT_SYMBOL_GPL(__set_irq_handler);
|
||||||
|
@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->status |= IRQ_NOPROBE;
|
desc->status |= IRQ_NOPROBE;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init set_irq_probe(unsigned int irq)
|
void __init set_irq_probe(unsigned int irq)
|
||||||
|
@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
desc->status &= ~IRQ_NOPROBE;
|
desc->status &= ~IRQ_NOPROBE;
|
||||||
spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
|
||||||
.chip = &no_irq_chip,
|
.chip = &no_irq_chip,
|
||||||
.handle_irq = handle_bad_irq,
|
.handle_irq = handle_bad_irq,
|
||||||
.depth = 1,
|
.depth = 1,
|
||||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||||
};
|
};
|
||||||
|
|
||||||
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
|
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
|
||||||
|
@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
|
||||||
{
|
{
|
||||||
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
|
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
|
||||||
|
|
||||||
spin_lock_init(&desc->lock);
|
raw_spin_lock_init(&desc->lock);
|
||||||
desc->irq = irq;
|
desc->irq = irq;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
desc->node = node;
|
desc->node = node;
|
||||||
|
@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
|
||||||
/*
|
/*
|
||||||
* Protect the sparse_irqs:
|
* Protect the sparse_irqs:
|
||||||
*/
|
*/
|
||||||
DEFINE_SPINLOCK(sparse_irq_lock);
|
DEFINE_RAW_SPINLOCK(sparse_irq_lock);
|
||||||
|
|
||||||
struct irq_desc **irq_desc_ptrs __read_mostly;
|
struct irq_desc **irq_desc_ptrs __read_mostly;
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
|
||||||
.chip = &no_irq_chip,
|
.chip = &no_irq_chip,
|
||||||
.handle_irq = handle_bad_irq,
|
.handle_irq = handle_bad_irq,
|
||||||
.depth = 1,
|
.depth = 1,
|
||||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||||
if (desc)
|
if (desc)
|
||||||
return desc;
|
return desc;
|
||||||
|
|
||||||
spin_lock_irqsave(&sparse_irq_lock, flags);
|
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||||
|
|
||||||
/* We have to check it to avoid races with another CPU */
|
/* We have to check it to avoid races with another CPU */
|
||||||
desc = irq_desc_ptrs[irq];
|
desc = irq_desc_ptrs[irq];
|
||||||
|
@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||||
irq_desc_ptrs[irq] = desc;
|
irq_desc_ptrs[irq] = desc;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||||
|
|
||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||||
.chip = &no_irq_chip,
|
.chip = &no_irq_chip,
|
||||||
.handle_irq = handle_bad_irq,
|
.handle_irq = handle_bad_irq,
|
||||||
.depth = 1,
|
.depth = 1,
|
||||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if (desc->chip->ack)
|
if (desc->chip->ack)
|
||||||
desc->chip->ack(irq);
|
desc->chip->ack(irq);
|
||||||
/*
|
/*
|
||||||
|
@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
|
||||||
for (;;) {
|
for (;;) {
|
||||||
irqreturn_t action_ret;
|
irqreturn_t action_ret;
|
||||||
|
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
action_ret = handle_IRQ_event(irq, action);
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
if (!noirqdebug)
|
if (!noirqdebug)
|
||||||
note_interrupt(irq, desc, action_ret);
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
|
||||||
spin_lock(&desc->lock);
|
raw_spin_lock(&desc->lock);
|
||||||
if (likely(!(desc->status & IRQ_PENDING)))
|
if (likely(!(desc->status & IRQ_PENDING)))
|
||||||
break;
|
break;
|
||||||
desc->status &= ~IRQ_PENDING;
|
desc->status &= ~IRQ_PENDING;
|
||||||
|
@ -536,7 +536,7 @@ out:
|
||||||
* disabled while the handler was running.
|
* disabled while the handler was running.
|
||||||
*/
|
*/
|
||||||
desc->chip->end(irq);
|
desc->chip->end(irq);
|
||||||
spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
|
||||||
extern struct lock_class_key irq_desc_lock_class;
|
extern struct lock_class_key irq_desc_lock_class;
|
||||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||||
extern void clear_kstat_irqs(struct irq_desc *desc);
|
extern void clear_kstat_irqs(struct irq_desc *desc);
|
||||||
extern spinlock_t sparse_irq_lock;
|
extern raw_spinlock_t sparse_irq_lock;
|
||||||
|
|
||||||
#ifdef CONFIG_SPARSE_IRQ
|
#ifdef CONFIG_SPARSE_IRQ
|
||||||
/* irq_desc_ptrs allocated at boot time */
|
/* irq_desc_ptrs allocated at boot time */
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue