mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-04 08:17:46 +00:00

While __atomic_add_unless() was originally intended as a building-block for atomic_add_unless(), it's now used in a number of places around the kernel. It's the only common atomic operation named __atomic*(), rather than atomic_*(), and for consistency it would be better named atomic_fetch_add_unless(). This lack of consistency is slightly confusing, and gets in the way of scripting atomics. Given that, let's clean things up and promote it to an official part of the atomics API, in the form of atomic_fetch_add_unless(). This patch converts definitions and invocations over to the new name, including the instrumented version, using the following script: ---- git grep -w __atomic_add_unless | while read line; do sed -i '{s/\<__atomic_add_unless\>/atomic_fetch_add_unless/}' "${line%%:*}"; done git grep -w __arch_atomic_add_unless | while read line; do sed -i '{s/\<__arch_atomic_add_unless\>/arch_atomic_fetch_add_unless/}' "${line%%:*}"; done ---- Note that we do not have atomic{64,_long}_fetch_add_unless(), which will be introduced by later patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Palmer Dabbelt <palmer@sifive.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-2-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
110 lines
2.5 KiB
C
110 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ARCH_H8300_ATOMIC__
|
|
#define __ARCH_H8300_ATOMIC__
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/types.h>
|
|
#include <asm/cmpxchg.h>
|
|
#include <asm/irqflags.h>
|
|
|
|
/*
|
|
* Atomic operations that C can't guarantee us. Useful for
|
|
* resource counting etc..
|
|
*/
|
|
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
#define atomic_read(v) READ_ONCE((v)->counter)
|
|
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
|
|
|
#define ATOMIC_OP_RETURN(op, c_op) \
|
|
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|
{ \
|
|
h8300flags flags; \
|
|
int ret; \
|
|
\
|
|
flags = arch_local_irq_save(); \
|
|
ret = v->counter c_op i; \
|
|
arch_local_irq_restore(flags); \
|
|
return ret; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(op, c_op) \
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
{ \
|
|
h8300flags flags; \
|
|
int ret; \
|
|
\
|
|
flags = arch_local_irq_save(); \
|
|
ret = v->counter; \
|
|
v->counter c_op i; \
|
|
arch_local_irq_restore(flags); \
|
|
return ret; \
|
|
}
|
|
|
|
#define ATOMIC_OP(op, c_op) \
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
h8300flags flags; \
|
|
\
|
|
flags = arch_local_irq_save(); \
|
|
v->counter c_op i; \
|
|
arch_local_irq_restore(flags); \
|
|
}
|
|
|
|
ATOMIC_OP_RETURN(add, +=)
|
|
ATOMIC_OP_RETURN(sub, -=)
|
|
|
|
#define ATOMIC_OPS(op, c_op) \
|
|
ATOMIC_OP(op, c_op) \
|
|
ATOMIC_FETCH_OP(op, c_op)
|
|
|
|
ATOMIC_OPS(and, &=)
|
|
ATOMIC_OPS(or, |=)
|
|
ATOMIC_OPS(xor, ^=)
|
|
ATOMIC_OPS(add, +=)
|
|
ATOMIC_OPS(sub, -=)
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
|
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
|
|
|
#define atomic_inc_return(v) atomic_add_return(1, v)
|
|
#define atomic_dec_return(v) atomic_sub_return(1, v)
|
|
|
|
#define atomic_inc(v) (void)atomic_inc_return(v)
|
|
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
|
|
|
#define atomic_dec(v) (void)atomic_dec_return(v)
|
|
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
int ret;
|
|
h8300flags flags;
|
|
|
|
flags = arch_local_irq_save();
|
|
ret = v->counter;
|
|
if (likely(ret == old))
|
|
v->counter = new;
|
|
arch_local_irq_restore(flags);
|
|
return ret;
|
|
}
|
|
|
|
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int ret;
|
|
h8300flags flags;
|
|
|
|
flags = arch_local_irq_save();
|
|
ret = v->counter;
|
|
if (ret != u)
|
|
v->counter += a;
|
|
arch_local_irq_restore(flags);
|
|
return ret;
|
|
}
|
|
|
|
#endif /* __ARCH_H8300_ATOMIC __ */
|