mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Now that the user space TLS register is assigned on every return to user space, we can use it to keep the 'current' pointer while running in the kernel. This removes the need to access it via thread_info, which is located at the base of the stack, but will be moved out of there in a subsequent patch. Use the __builtin_thread_pointer() helper when available - this will help GCC understand that reloading the value within the same function is not necessary, even when using the per-task stack protector (which also generates accesses via the TLS register). For example, the generated code below loads TPIDRURO only once, and uses it to access both the stack canary and the preempt_count fields. <do_one_initcall>: e92d 41f0 stmdb sp!, {r4, r5, r6, r7, r8, lr} ee1d 4f70 mrc 15, 0, r4, cr13, cr0, {3} 4606 mov r6, r0 b094 sub sp, #80 ; 0x50 f8d4 34e8 ldr.w r3, [r4, #1256] ; 0x4e8 <- stack canary 9313 str r3, [sp, #76] ; 0x4c f8d4 8004 ldr.w r8, [r4, #4] <- preempt count Co-developed-by: Keith Packard <keithpac@amazon.com> Signed-off-by: Keith Packard <keithpac@amazon.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
34 lines
1.1 KiB
C
34 lines
1.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_ARM_SWITCH_TO_H
|
|
#define __ASM_ARM_SWITCH_TO_H
|
|
|
|
#include <linux/thread_info.h>
|
|
|
|
/*
|
|
* For v7 SMP cores running a preemptible kernel we may be pre-empted
|
|
* during a TLB maintenance operation, so execute an inner-shareable dsb
|
|
* to ensure that the maintenance completes in case we migrate to another
|
|
* CPU.
|
|
*/
|
|
#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
|
|
#define __complete_pending_tlbi() dsb(ish)
|
|
#else
|
|
#define __complete_pending_tlbi()
|
|
#endif
|
|
|
|
/*
|
|
* switch_to(prev, next) should switch from task `prev' to `next'
|
|
* `prev' will never be the same as `next'. schedule() itself
|
|
* contains the memory barrier to tell GCC not to cache `current'.
|
|
*/
|
|
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
|
|
|
|
#define switch_to(prev,next,last) \
|
|
do { \
|
|
__complete_pending_tlbi(); \
|
|
if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO)) \
|
|
__this_cpu_write(__entry_task, next); \
|
|
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
|
} while (0)
|
|
|
|
#endif /* __ASM_ARM_SWITCH_TO_H */
|