linux/arch/arm/include/asm/percpu.h

71 lines
1.7 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2012 Calxeda, Inc.
*/
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
#include <asm/insn.h>
register unsigned long current_stack_pointer asm ("sp");
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
*/
#ifdef CONFIG_SMP
static inline void set_my_cpu_offset(unsigned long off)
{
extern unsigned int smp_on_up;
if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
return;
/* Set TPIDRPRW */
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
}
static __always_inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
/*
* Read TPIDRPRW.
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
#ifdef CONFIG_CPU_V6
"1: \n\t"
" .subsection 1 \n\t"
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
"2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
" b 1b \n\t"
ARM: 9176/1: avoid literal references in inline assembly Nathan reports that the new get_current() and per-CPU offset accessors may cause problems at build time due to the use of a literal to hold the address of the respective variables. This is due to the fact that LLD before v14 does not support the PC-relative group relocations that are normally used for this, and the fallback relies on literals but does not emit the literal pools explictly using the .ltorg directive. ./arch/arm/include/asm/current.h:53:6: error: out of range pc-relative fixup value asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur)); ^ ./arch/arm/include/asm/insn.h:25:2: note: expanded from macro 'LOAD_SYM_ARMV6' " ldr " #reg ", =" #sym " nt" ^ <inline asm>:1:3: note: instantiated into assembly here ldr r0, =__current ^ Since emitting a literal pool in this particular case is not possible, let's avoid the LOAD_SYM_ARMV6() entirely, and use the ordinary C assigment instead. As it turns out, there are other such cases, and here, using .ltorg to emit the literal pool within range of the LDR instruction would be possible due to the presence of an unconditional branch right after it. Unfortunately, putting .ltorg directives in subsections appears to confuse the Clang inline assembler, resulting in similar errors even though the .ltorg is most definitely within range. So let's fix this by emitting the literal explicitly, and not rely on the assembler to figure this out. This means we have move the fallback out of the LOAD_SYM_ARMV6() macro and into the callers. Link: https://github.com/ClangBuiltLinux/linux/issues/1551 Fixes: 9c46929e7989 ("ARM: implement THREAD_INFO_IN_TASK for uniprocessor systems") Reported-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2021-12-24 10:31:00 +01:00
#else
"2: ldr %0, 3f \n\t"
" ldr %0, [%0] \n\t"
" b 1b \n\t"
"3: .long __per_cpu_offset \n\t"
#endif
" .previous \n\t"
" .pushsection \".alt.smp.init\", \"a\" \n\t"
" .long 0b - . \n\t"
" b . + (2b - 0b) \n\t"
" .popsection \n\t"
#endif
: "=r" (off)
: "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
#define __my_cpu_offset __my_cpu_offset()
#else
#define set_my_cpu_offset(x) do {} while(0)
#endif /* CONFIG_SMP */
#include <asm-generic/percpu.h>
#endif /* _ASM_ARM_PERCPU_H_ */