mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

While the GCC and Clang compilers already define __ASSEMBLER__ automatically when compiling assembler code, __ASSEMBLY__ is a macro that only gets defined by the Makefiles in the kernel. This is bad since macros starting with two underscores are names that are reserved by the C language. It can also be very confusing for the developers when switching between userspace and kernelspace coding, or when dealing with uapi headers that rather should use __ASSEMBLER__ instead. So let's now standardize on the __ASSEMBLER__ macro that is provided by the compilers. This is almost a completely mechanical patch (done with a simple "sed -i" statement), with one comment tweaked manually in the arch/loongarch/include/asm/cpu.h file (it was missing the trailing underscores). Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
85 lines
1.8 KiB
C
85 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
#ifndef _ASM_IRQFLAGS_H
|
|
#define _ASM_IRQFLAGS_H
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/stringify.h>
|
|
#include <asm/loongarch.h>
|
|
|
|
static inline void arch_local_irq_enable(void)
|
|
{
|
|
u32 flags = CSR_CRMD_IE;
|
|
register u32 mask asm("t0") = CSR_CRMD_IE;
|
|
|
|
__asm__ __volatile__(
|
|
"csrxchg %[val], %[mask], %[reg]\n\t"
|
|
: [val] "+r" (flags)
|
|
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
|
: "memory");
|
|
}
|
|
|
|
static inline void arch_local_irq_disable(void)
|
|
{
|
|
u32 flags = 0;
|
|
register u32 mask asm("t0") = CSR_CRMD_IE;
|
|
|
|
__asm__ __volatile__(
|
|
"csrxchg %[val], %[mask], %[reg]\n\t"
|
|
: [val] "+r" (flags)
|
|
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
|
: "memory");
|
|
}
|
|
|
|
static inline unsigned long arch_local_irq_save(void)
|
|
{
|
|
u32 flags = 0;
|
|
register u32 mask asm("t0") = CSR_CRMD_IE;
|
|
|
|
__asm__ __volatile__(
|
|
"csrxchg %[val], %[mask], %[reg]\n\t"
|
|
: [val] "+r" (flags)
|
|
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
|
: "memory");
|
|
return flags;
|
|
}
|
|
|
|
static inline void arch_local_irq_restore(unsigned long flags)
|
|
{
|
|
register u32 mask asm("t0") = CSR_CRMD_IE;
|
|
|
|
__asm__ __volatile__(
|
|
"csrxchg %[val], %[mask], %[reg]\n\t"
|
|
: [val] "+r" (flags)
|
|
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
|
: "memory");
|
|
}
|
|
|
|
static inline unsigned long arch_local_save_flags(void)
|
|
{
|
|
u32 flags;
|
|
__asm__ __volatile__(
|
|
"csrrd %[val], %[reg]\n\t"
|
|
: [val] "=r" (flags)
|
|
: [reg] "i" (LOONGARCH_CSR_CRMD)
|
|
: "memory");
|
|
return flags;
|
|
}
|
|
|
|
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
|
{
|
|
return !(flags & CSR_CRMD_IE);
|
|
}
|
|
|
|
static inline int arch_irqs_disabled(void)
|
|
{
|
|
return arch_irqs_disabled_flags(arch_local_save_flags());
|
|
}
|
|
|
|
#endif /* #ifndef __ASSEMBLER__ */
|
|
|
|
#endif /* _ASM_IRQFLAGS_H */
|