mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
locking/atomic/x86: Use 's64 *' for 'old' argument of atomic64_try_cmpxchg()
atomic64_try_cmpxchg() declares old argument as 'long *', this makes it impossible to use it in portable code. If caller passes 'long *', it becomes 32-bits on 32-bit arches. If caller passes 's64 *', it does not compile on x86_64. Change type of old argument to 's64 *' instead. Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: kasan-dev@googlegroups.com Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/fa6f77f2375150d26ea796a77e8b59195fd2ab13.1497690003.git.dvyukov@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ba1c9f83f6
commit
007d185b44
2 changed files with 7 additions and 7 deletions
|
@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
|||
}
|
||||
|
||||
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
|
||||
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new)
|
||||
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
|
||||
{
|
||||
return try_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
|
|||
*/
|
||||
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
{
|
||||
long c = atomic64_read(v);
|
||||
s64 c = atomic64_read(v);
|
||||
do {
|
||||
if (unlikely(c == u))
|
||||
return false;
|
||||
|
@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
|
|||
*/
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long dec, c = atomic64_read(v);
|
||||
s64 dec, c = atomic64_read(v);
|
||||
do {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
|
@ -236,7 +236,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
|
|||
|
||||
static inline long atomic64_fetch_and(long i, atomic64_t *v)
|
||||
{
|
||||
long val = atomic64_read(v);
|
||||
s64 val = atomic64_read(v);
|
||||
|
||||
do {
|
||||
} while (!atomic64_try_cmpxchg(v, &val, val & i));
|
||||
|
@ -253,7 +253,7 @@ static inline void atomic64_or(long i, atomic64_t *v)
|
|||
|
||||
static inline long atomic64_fetch_or(long i, atomic64_t *v)
|
||||
{
|
||||
long val = atomic64_read(v);
|
||||
s64 val = atomic64_read(v);
|
||||
|
||||
do {
|
||||
} while (!atomic64_try_cmpxchg(v, &val, val | i));
|
||||
|
@ -270,7 +270,7 @@ static inline void atomic64_xor(long i, atomic64_t *v)
|
|||
|
||||
static inline long atomic64_fetch_xor(long i, atomic64_t *v)
|
||||
{
|
||||
long val = atomic64_read(v);
|
||||
s64 val = atomic64_read(v);
|
||||
|
||||
do {
|
||||
} while (!atomic64_try_cmpxchg(v, &val, val ^ i));
|
||||
|
|
|
@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
|
|||
#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
|
||||
({ \
|
||||
bool success; \
|
||||
__typeof__(_ptr) _old = (_pold); \
|
||||
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
|
||||
__typeof__(*(_ptr)) __old = *_old; \
|
||||
__typeof__(*(_ptr)) __new = (_new); \
|
||||
switch (size) { \
|
||||
|
|
Loading…
Add table
Reference in a new issue