mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	[PATCH] atomic: cmpxchg
Introduce an atomic_cmpxchg operation. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
		
							parent
							
								
									53e86b91b7
								
							
						
					
					
						commit
						4a6dae6d38
					
				
					 23 changed files with 169 additions and 5 deletions
				
			
		|  | @ -115,6 +115,21 @@ boolean is return which indicates whether the resulting counter value | |||
| is negative.  It requires explicit memory barrier semantics around the | ||||
| operation. | ||||
| 
 | ||||
| Finally: | ||||
| 
 | ||||
| 	int atomic_cmpxchg(atomic_t *v, int old, int new); | ||||
| 
 | ||||
| This performs an atomic compare exchange operation on the atomic value v, | ||||
| with the given old and new values. Like all atomic_xxx operations, | ||||
| atomic_cmpxchg will only satisfy its atomicity semantics as long as all | ||||
| other accesses of *v are performed through atomic_xxx operations. | ||||
| 
 | ||||
| atomic_cmpxchg requires explicit memory barriers around the operation. | ||||
| 
 | ||||
| The semantics for atomic_cmpxchg are the same as those defined for 'cas' | ||||
| below. | ||||
| 
 | ||||
| 
 | ||||
| If a caller requires memory barrier semantics around an atomic_t | ||||
| operation which does not return a value, a set of interfaces are | ||||
| defined which accomplish this: | ||||
|  |  | |||
|  | @ -37,17 +37,28 @@ int __atomic_add_return(int i, atomic_t *v) | |||
| 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags); | ||||
| 	return ret; | ||||
| } | ||||
| EXPORT_SYMBOL(__atomic_add_return); | ||||
| 
 | ||||
| int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(ATOMIC_HASH(v), flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| void atomic_set(atomic_t *v, int i) | ||||
| { | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	spin_lock_irqsave(ATOMIC_HASH(v), flags); | ||||
| 
 | ||||
| 	v->counter = i; | ||||
| 
 | ||||
| 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags); | ||||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(__atomic_add_return); | ||||
| EXPORT_SYMBOL(atomic_set); | ||||
| 
 | ||||
|  |  | |||
|  | @ -177,6 +177,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |||
| 	return result; | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| #define atomic_dec_return(v) atomic_sub_return(1,(v)) | ||||
| #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) | ||||
| 
 | ||||
|  |  | |||
|  | @ -80,6 +80,23 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
| 	return result; | ||||
| } | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | ||||
| { | ||||
| 	u32 oldval, res; | ||||
| 
 | ||||
| 	do { | ||||
| 		__asm__ __volatile__("@ atomic_cmpxchg\n" | ||||
| 		"ldrex	%1, [%2]\n" | ||||
| 		"teq	%1, %3\n" | ||||
| 		"strexeq %0, %4, [%2]\n" | ||||
| 		    : "=&r" (res), "=&r" (oldval) | ||||
| 		    : "r" (&ptr->counter), "Ir" (old), "r" (new) | ||||
| 		    : "cc"); | ||||
| 	} while (res); | ||||
| 
 | ||||
| 	return oldval; | ||||
| } | ||||
| 
 | ||||
| static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||||
| { | ||||
| 	unsigned long tmp, tmp2; | ||||
|  | @ -131,6 +148,20 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
| 	return val; | ||||
| } | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||||
| { | ||||
| 	unsigned long flags; | ||||
|  |  | |||
|  | @ -62,6 +62,20 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
|         return val; | ||||
| } | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||||
| { | ||||
|         unsigned long flags; | ||||
|  |  | |||
|  | @ -123,6 +123,19 @@ static inline int atomic_inc_and_test(volatile atomic_t *v) | |||
| 	return retval; | ||||
| } | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	cris_atomic_save(v, flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 	cris_atomic_restore(v, flags); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /* Atomic operations are already serializing */ | ||||
| #define smp_mb__before_atomic_dec()    barrier() | ||||
| #define smp_mb__after_atomic_dec()     barrier() | ||||
|  |  | |||
|  | @ -414,4 +414,6 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); | |||
| 
 | ||||
| #endif | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | ||||
| 
 | ||||
| #endif /* _ASM_ATOMIC_H */ | ||||
|  |  | |||
|  | @ -82,6 +82,19 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) | |||
| 	return ret == 0; | ||||
| } | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 	local_irq_restore(flags); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) | ||||
| { | ||||
| 	__asm__ __volatile__("stc ccr,r1l\n\t" | ||||
|  |  | |||
|  | @ -215,6 +215,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
| 	return atomic_add_return(-i,v); | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | ||||
| 
 | ||||
| #define atomic_inc_return(v)  (atomic_add_return(1,v)) | ||||
| #define atomic_dec_return(v)  (atomic_sub_return(1,v)) | ||||
| 
 | ||||
|  |  | |||
|  | @ -88,6 +88,8 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) | |||
| 	return new; | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | ||||
| 
 | ||||
| #define atomic_add_return(i,v)						\ | ||||
| ({									\ | ||||
| 	int __ia64_aar_i = (i);						\ | ||||
|  |  | |||
|  | @ -139,6 +139,8 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) | |||
| 	__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| /* Atomic operations are already serializing */ | ||||
| #define smp_mb__before_atomic_dec()	barrier() | ||||
| #define smp_mb__after_atomic_dec()	barrier() | ||||
|  |  | |||
|  | @ -128,6 +128,8 @@ static inline int atomic_sub_return(int i, atomic_t * v) | |||
| 	return temp; | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| #define atomic_dec_return(v) atomic_sub_return(1,(v)) | ||||
| #define atomic_inc_return(v) atomic_add_return(1,(v)) | ||||
| 
 | ||||
|  |  | |||
|  | @ -287,6 +287,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
| 	return result; | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| #define atomic_dec_return(v) atomic_sub_return(1,(v)) | ||||
| #define atomic_inc_return(v) atomic_add_return(1,(v)) | ||||
| 
 | ||||
|  |  | |||
|  | @ -164,6 +164,7 @@ static __inline__ int atomic_read(const atomic_t *v) | |||
| } | ||||
| 
 | ||||
| /* exported interface */ | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| #define atomic_add(i,v)	((void)(__atomic_add_return( ((int)i),(v)))) | ||||
| #define atomic_sub(i,v)	((void)(__atomic_add_return(-((int)i),(v)))) | ||||
|  |  | |||
|  | @ -164,6 +164,8 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
| 	return t; | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| #define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0) | ||||
| #define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0) | ||||
| 
 | ||||
|  |  | |||
|  | @ -198,6 +198,8 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) | |||
|         return retval; | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) | ||||
| 
 | ||||
| #define smp_mb__before_atomic_dec()	smp_mb() | ||||
| #define smp_mb__after_atomic_dec()	smp_mb() | ||||
| #define smp_mb__before_atomic_inc()	smp_mb() | ||||
|  |  | |||
|  | @ -87,6 +87,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
| #define atomic_inc(v) atomic_add(1,(v)) | ||||
| #define atomic_dec(v) atomic_sub(1,(v)) | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||||
| { | ||||
| 	unsigned long flags; | ||||
|  |  | |||
|  | @ -99,6 +99,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
| #define atomic_inc(v) atomic_add(1,(v)) | ||||
| #define atomic_dec(v) atomic_sub(1,(v)) | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||||
| { | ||||
| 	unsigned long flags; | ||||
|  |  | |||
|  | @ -19,6 +19,7 @@ typedef struct { volatile int counter; } atomic_t; | |||
| #define ATOMIC_INIT(i)  { (i) } | ||||
| 
 | ||||
| extern int __atomic_add_return(int, atomic_t *); | ||||
| extern int atomic_cmpxchg(atomic_t *, int, int); | ||||
| extern void atomic_set(atomic_t *, int); | ||||
| 
 | ||||
| #define atomic_read(v)          ((v)->counter) | ||||
|  |  | |||
|  | @ -70,6 +70,8 @@ extern int atomic64_sub_ret(int, atomic64_t *); | |||
| #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) | ||||
| #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| /* Atomic operations are already serializing */ | ||||
| #ifdef CONFIG_SMP | ||||
| #define smp_mb__before_atomic_dec()	membar_storeload_loadload(); | ||||
|  |  | |||
|  | @ -90,6 +90,20 @@ static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *add | |||
| #define atomic_dec_and_test(v)		(atomic_sub_return (1, (v)) == 0) | ||||
| #define atomic_add_negative(i,v)	(atomic_add_return ((i), (v)) < 0) | ||||
| 
 | ||||
| static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||||
| { | ||||
| 	int ret; | ||||
| 	unsigned long flags; | ||||
| 
 | ||||
| 	local_irq_save(flags); | ||||
| 	ret = v->counter; | ||||
| 	if (likely(ret == old)) | ||||
| 		v->counter = new; | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| /* Atomic operations are already serializing on ARM */ | ||||
| #define smp_mb__before_atomic_dec()	barrier() | ||||
| #define smp_mb__after_atomic_dec()	barrier() | ||||
|  |  | |||
|  | @ -360,6 +360,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) | |||
| 	return atomic_add_return(-i,v); | ||||
| } | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | ||||
| 
 | ||||
| #define atomic_inc_return(v)  (atomic_add_return(1,v)) | ||||
| #define atomic_dec_return(v)  (atomic_sub_return(1,v)) | ||||
| 
 | ||||
|  |  | |||
|  | @ -223,6 +223,7 @@ static inline int atomic_sub_return(int i, atomic_t * v) | |||
|  */ | ||||
| #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) | ||||
| 
 | ||||
| #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||||
| 
 | ||||
| static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||||
| { | ||||
|  |  | |||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Nick Piggin
						Nick Piggin