[PATCH] atomic: cmpxchg

Introduce an atomic_cmpxchg operation.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Nick Piggin 2005-11-13 16:07:24 -08:00 committed by Linus Torvalds
parent 53e86b91b7
commit 4a6dae6d38
23 changed files with 169 additions and 5 deletions

View file

@ -115,6 +115,21 @@ boolean is return which indicates whether the resulting counter value
is negative. It requires explicit memory barrier semantics around the is negative. It requires explicit memory barrier semantics around the
operation. operation.
Finally:
int atomic_cmpxchg(atomic_t *v, int old, int new);
This performs an atomic compare exchange operation on the atomic value v,
with the given old and new values. Like all atomic_xxx operations,
atomic_cmpxchg will only satisfy its atomicity semantics as long as all
other accesses of *v are performed through atomic_xxx operations.
atomic_cmpxchg requires explicit memory barriers around the operation.
The semantics for atomic_cmpxchg are the same as those defined for 'cas'
below.
If a caller requires memory barrier semantics around an atomic_t If a caller requires memory barrier semantics around an atomic_t
operation which does not return a value, a set of interfaces are operation which does not return a value, a set of interfaces are
defined which accomplish this: defined which accomplish this:

View file

@ -37,17 +37,28 @@ int __atomic_add_return(int i, atomic_t *v)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret; return ret;
} }
EXPORT_SYMBOL(__atomic_add_return);
int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
void atomic_set(atomic_t *v, int i) void atomic_set(atomic_t *v, int i)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags); spin_lock_irqsave(ATOMIC_HASH(v), flags);
v->counter = i; v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
} }
EXPORT_SYMBOL(__atomic_add_return);
EXPORT_SYMBOL(atomic_set); EXPORT_SYMBOL(atomic_set);

View file

@ -177,6 +177,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
return result; return result;
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) #define atomic64_dec_return(v) atomic64_sub_return(1,(v))

View file

@ -80,6 +80,23 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return result; return result;
} }
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
u32 oldval, res;
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
"teq %1, %3\n"
"strexeq %0, %4, [%2]\n"
: "=&r" (res), "=&r" (oldval)
: "r" (&ptr->counter), "Ir" (old), "r" (new)
: "cc");
} while (res);
return oldval;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{ {
unsigned long tmp, tmp2; unsigned long tmp, tmp2;
@ -131,6 +148,20 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val; return val;
} }
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{ {
unsigned long flags; unsigned long flags;

View file

@ -62,6 +62,20 @@ static inline int atomic_sub_return(int i, atomic_t *v)
return val; return val;
} }
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{ {
unsigned long flags; unsigned long flags;

View file

@ -123,6 +123,19 @@ static inline int atomic_inc_and_test(volatile atomic_t *v)
return retval; return retval;
} }
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
cris_atomic_save(v, flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
cris_atomic_restore(v, flags);
return ret;
}
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier()

View file

@ -414,4 +414,6 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#endif #endif
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
#endif /* _ASM_ATOMIC_H */ #endif /* _ASM_ATOMIC_H */

View file

@ -82,6 +82,19 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
return ret == 0; return ret == 0;
} }
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
{ {
__asm__ __volatile__("stc ccr,r1l\n\t" __asm__ __volatile__("stc ccr,r1l\n\t"

View file

@ -215,6 +215,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
return atomic_add_return(-i,v); return atomic_add_return(-i,v);
} }
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
#define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v))

View file

@ -88,6 +88,8 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
return new; return new;
} }
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
#define atomic_add_return(i,v) \ #define atomic_add_return(i,v) \
({ \ ({ \
int __ia64_aar_i = (i); \ int __ia64_aar_i = (i); \

View file

@ -139,6 +139,8 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier()

View file

@ -128,6 +128,8 @@ static inline int atomic_sub_return(int i, atomic_t * v)
return temp; return temp;
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v))

View file

@ -287,6 +287,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
return result; return result;
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v))

View file

@ -164,6 +164,7 @@ static __inline__ int atomic_read(const atomic_t *v)
} }
/* exported interface */ /* exported interface */
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))

View file

@ -164,6 +164,8 @@ static __inline__ int atomic_dec_return(atomic_t *v)
return t; return t;
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)

View file

@ -198,6 +198,8 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
return retval; return retval;
} }
#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter)))
#define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__before_atomic_inc() smp_mb()

View file

@ -87,6 +87,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_inc(v) atomic_add(1,(v)) #define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v)) #define atomic_dec(v) atomic_sub(1,(v))
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;

View file

@ -99,6 +99,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_inc(v) atomic_add(1,(v)) #define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v)) #define atomic_dec(v) atomic_sub(1,(v))
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;

View file

@ -19,6 +19,7 @@ typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *); extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int); extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)

View file

@ -70,6 +70,8 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec() membar_storeload_loadload(); #define smp_mb__before_atomic_dec() membar_storeload_loadload();

View file

@ -90,6 +90,20 @@ static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *add
#define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0)
#define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0) #define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
/* Atomic operations are already serializing on ARM */ /* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier()

View file

@ -360,6 +360,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
return atomic_add_return(-i,v); return atomic_add_return(-i,v);
} }
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
#define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_inc_return(v) (atomic_add_return(1,v))
#define atomic_dec_return(v) (atomic_sub_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v))

View file

@ -223,6 +223,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
*/ */
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {