mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

While the GCC and Clang compilers already define __ASSEMBLER__ automatically when compiling assembly code, __ASSEMBLY__ is a macro that only gets defined by the Makefiles in the kernel. This can be very confusing when switching between userspace and kernelspace coding, or when dealing with uapi headers that rather should use __ASSEMBLER__ instead. So let's standardize on the __ASSEMBLER__ macro that is provided by the compilers now. This is mostly a completely mechanical patch (done with a simple "sed -i" statement), except for some manual tweaks in the files arch/parisc/include/asm/smp.h, arch/parisc/include/asm/signal.h, arch/parisc/include/asm/thread_info.h and arch/parisc/include/asm/vdso.h that had the macro spelled in a wrong way. Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Helge Deller <deller@gmx.de> Cc: linux-parisc@vger.kernel.org Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Helge Deller <deller@gmx.de>
97 lines
2.5 KiB
C
97 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_BARRIER_H
|
|
#define __ASM_BARRIER_H
|
|
|
|
#include <asm/alternative.h>
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
/* The synchronize caches instruction executes as a nop on systems in
|
|
which all memory references are performed in order. */
|
|
#define synchronize_caches() asm volatile("sync" \
|
|
ALTERNATIVE(ALT_COND_NO_SMP, INSN_NOP) \
|
|
: : : "memory")
|
|
|
|
#if defined(CONFIG_SMP)
|
|
#define mb() do { synchronize_caches(); } while (0)
|
|
#define rmb() mb()
|
|
#define wmb() mb()
|
|
#define dma_rmb() mb()
|
|
#define dma_wmb() mb()
|
|
#else
|
|
#define mb() barrier()
|
|
#define rmb() barrier()
|
|
#define wmb() barrier()
|
|
#define dma_rmb() barrier()
|
|
#define dma_wmb() barrier()
|
|
#endif
|
|
|
|
#define __smp_mb() mb()
|
|
#define __smp_rmb() mb()
|
|
#define __smp_wmb() mb()
|
|
|
|
#define __smp_store_release(p, v) \
|
|
do { \
|
|
typeof(p) __p = (p); \
|
|
union { typeof(*p) __val; char __c[1]; } __u = \
|
|
{ .__val = (__force typeof(*p)) (v) }; \
|
|
compiletime_assert_atomic_type(*p); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile("stb,ma %0,0(%1)" \
|
|
: : "r"(*(__u8 *)__u.__c), "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile("sth,ma %0,0(%1)" \
|
|
: : "r"(*(__u16 *)__u.__c), "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile("stw,ma %0,0(%1)" \
|
|
: : "r"(*(__u32 *)__u.__c), "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
if (IS_ENABLED(CONFIG_64BIT)) \
|
|
asm volatile("std,ma %0,0(%1)" \
|
|
: : "r"(*(__u64 *)__u.__c), "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
} \
|
|
} while (0)
|
|
|
|
#define __smp_load_acquire(p) \
|
|
({ \
|
|
union { typeof(*p) __val; char __c[1]; } __u; \
|
|
typeof(p) __p = (p); \
|
|
compiletime_assert_atomic_type(*p); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile("ldb,ma 0(%1),%0" \
|
|
: "=r"(*(__u8 *)__u.__c) : "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile("ldh,ma 0(%1),%0" \
|
|
: "=r"(*(__u16 *)__u.__c) : "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile("ldw,ma 0(%1),%0" \
|
|
: "=r"(*(__u32 *)__u.__c) : "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
if (IS_ENABLED(CONFIG_64BIT)) \
|
|
asm volatile("ldd,ma 0(%1),%0" \
|
|
: "=r"(*(__u64 *)__u.__c) : "r"(__p) \
|
|
: "memory"); \
|
|
break; \
|
|
} \
|
|
__u.__val; \
|
|
})
|
|
#include <asm-generic/barrier.h>
|
|
|
|
#endif /* !__ASSEMBLER__ */
|
|
#endif /* __ASM_BARRIER_H */
|