2019-05-28 10:10:04 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-11-29 20:39:54 +01:00
|
|
|
/*
|
|
|
|
* Copyright 2012 Calxeda, Inc.
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_ARM_PERCPU_H_
|
|
|
|
#define _ASM_ARM_PERCPU_H_
|
|
|
|
|
2021-11-25 10:26:44 +01:00
|
|
|
#include <asm/insn.h>
|
|
|
|
|
2020-06-22 17:21:58 +02:00
|
|
|
register unsigned long current_stack_pointer asm ("sp");
|
|
|
|
|
2012-11-29 20:39:54 +01:00
|
|
|
/*
|
|
|
|
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
|
|
|
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
|
|
|
|
*/
|
2021-11-25 10:26:44 +01:00
|
|
|
#ifdef CONFIG_SMP
|
2012-11-29 20:39:54 +01:00
|
|
|
static inline void set_my_cpu_offset(unsigned long off)
|
|
|
|
{
|
2021-11-25 10:26:44 +01:00
|
|
|
extern unsigned int smp_on_up;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
|
|
|
|
return;
|
|
|
|
|
2012-11-29 20:39:54 +01:00
|
|
|
/* Set TPIDRPRW */
|
|
|
|
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
|
|
|
|
}
|
|
|
|
|
2022-01-24 23:32:51 +01:00
|
|
|
static __always_inline unsigned long __my_cpu_offset(void)
|
2012-11-29 20:39:54 +01:00
|
|
|
{
|
|
|
|
unsigned long off;
|
2013-06-05 11:20:33 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Read TPIDRPRW.
|
|
|
|
* We want to allow caching the value, so avoid using volatile and
|
|
|
|
* instead use a fake stack read to hazard against barrier().
|
|
|
|
*/
|
2021-11-25 10:26:44 +01:00
|
|
|
asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
|
|
|
|
#ifdef CONFIG_CPU_V6
|
|
|
|
"1: \n\t"
|
|
|
|
" .subsection 1 \n\t"
|
2022-01-24 19:16:58 +01:00
|
|
|
#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
|
|
|
|
!(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
|
2021-11-25 10:26:44 +01:00
|
|
|
"2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
|
|
|
|
" b 1b \n\t"
|
ARM: 9176/1: avoid literal references in inline assembly
Nathan reports that the new get_current() and per-CPU offset accessors
may cause problems at build time due to the use of a literal to hold the
address of the respective variables. This is due to the fact that LLD
before v14 does not support the PC-relative group relocations that are
normally used for this, and the fallback relies on literals but does not
emit the literal pools explictly using the .ltorg directive.
./arch/arm/include/asm/current.h:53:6: error: out of range pc-relative fixup value
asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
^
./arch/arm/include/asm/insn.h:25:2: note: expanded from macro 'LOAD_SYM_ARMV6'
" ldr " #reg ", =" #sym " nt"
^
<inline asm>:1:3: note: instantiated into assembly here
ldr r0, =__current
^
Since emitting a literal pool in this particular case is not possible,
let's avoid the LOAD_SYM_ARMV6() entirely, and use the ordinary C
assigment instead.
As it turns out, there are other such cases, and here, using .ltorg to
emit the literal pool within range of the LDR instruction would be
possible due to the presence of an unconditional branch right after it.
Unfortunately, putting .ltorg directives in subsections appears to
confuse the Clang inline assembler, resulting in similar errors even
though the .ltorg is most definitely within range.
So let's fix this by emitting the literal explicitly, and not rely on
the assembler to figure this out. This means we have move the fallback
out of the LOAD_SYM_ARMV6() macro and into the callers.
Link: https://github.com/ClangBuiltLinux/linux/issues/1551
Fixes: 9c46929e7989 ("ARM: implement THREAD_INFO_IN_TASK for uniprocessor systems")
Reported-by: Nathan Chancellor <natechancellor@gmail.com>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2021-12-24 10:31:00 +01:00
|
|
|
#else
|
|
|
|
"2: ldr %0, 3f \n\t"
|
|
|
|
" ldr %0, [%0] \n\t"
|
|
|
|
" b 1b \n\t"
|
|
|
|
"3: .long __per_cpu_offset \n\t"
|
|
|
|
#endif
|
2021-11-25 10:26:44 +01:00
|
|
|
" .previous \n\t"
|
|
|
|
" .pushsection \".alt.smp.init\", \"a\" \n\t"
|
|
|
|
" .long 0b - . \n\t"
|
|
|
|
" b . + (2b - 0b) \n\t"
|
|
|
|
" .popsection \n\t"
|
|
|
|
#endif
|
|
|
|
: "=r" (off)
|
|
|
|
: "Q" (*(const unsigned long *)current_stack_pointer));
|
2013-06-05 11:20:33 +01:00
|
|
|
|
2012-11-29 20:39:54 +01:00
|
|
|
return off;
|
|
|
|
}
|
|
|
|
#define __my_cpu_offset __my_cpu_offset()
|
|
|
|
#else
|
|
|
|
#define set_my_cpu_offset(x) do {} while(0)
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
#include <asm-generic/percpu.h>
|
|
|
|
|
|
|
|
#endif /* _ASM_ARM_PERCPU_H_ */
|