2019-06-03 07:44:50 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 11:49:28 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/thread_info.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 Russell King.
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_THREAD_INFO_H
|
|
|
|
#define __ASM_THREAD_INFO_H
|
|
|
|
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
struct task_struct;
|
|
|
|
|
2017-07-14 16:39:21 +01:00
|
|
|
#include <asm/memory.h>
|
2016-11-03 20:23:05 +00:00
|
|
|
#include <asm/stack_pointer.h>
|
2012-03-05 11:49:28 +00:00
|
|
|
#include <asm/types.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* low level task data that entry.S needs immediate access to.
|
|
|
|
*/
|
|
|
|
struct thread_info {
|
|
|
|
unsigned long flags; /* low level flags */
|
2016-07-01 16:53:00 +01:00
|
|
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
|
|
|
u64 ttbr0; /* saved TTBR0_EL1 */
|
|
|
|
#endif
|
2018-09-20 10:26:40 +01:00
|
|
|
union {
|
|
|
|
u64 preempt_count; /* 0 => preemptible, <0 => bug */
|
|
|
|
struct {
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
u32 need_resched;
|
|
|
|
u32 count;
|
|
|
|
#else
|
|
|
|
u32 count;
|
|
|
|
u32 need_resched;
|
|
|
|
#endif
|
|
|
|
} preempt;
|
|
|
|
};
|
2020-04-27 09:00:16 -07:00
|
|
|
#ifdef CONFIG_SHADOW_CALL_STACK
|
|
|
|
void *scs_base;
|
2020-05-15 14:11:05 +01:00
|
|
|
void *scs_sp;
|
2020-04-27 09:00:16 -07:00
|
|
|
#endif
|
2021-09-14 14:10:29 +02:00
|
|
|
u32 cpu;
|
2012-03-05 11:49:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define thread_saved_pc(tsk) \
|
|
|
|
((unsigned long)(tsk->thread.cpu_context.pc))
|
|
|
|
#define thread_saved_sp(tsk) \
|
|
|
|
((unsigned long)(tsk->thread.cpu_context.sp))
|
|
|
|
#define thread_saved_fp(tsk) \
|
|
|
|
((unsigned long)(tsk->thread.cpu_context.fp))
|
|
|
|
|
2017-08-20 13:20:48 +03:00
|
|
|
void arch_setup_new_exec(void);
|
|
|
|
#define arch_setup_new_exec arch_setup_new_exec
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
#endif
|
|
|
|
|
2019-07-31 15:35:20 +02:00
|
|
|
#define TIF_SIGPENDING 0 /* signal pending */
|
|
|
|
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
|
2025-03-05 11:49:25 +01:00
|
|
|
#define TIF_NEED_RESCHED_LAZY 2 /* Lazy rescheduling needed */
|
|
|
|
#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */
|
|
|
|
#define TIF_FOREIGN_FPSTATE 4 /* CPU's FP state is not current's */
|
|
|
|
#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */
|
|
|
|
#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
|
|
|
|
#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
|
2019-07-31 15:35:20 +02:00
|
|
|
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
|
|
|
|
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
|
|
|
|
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
|
|
|
#define TIF_SECCOMP 11 /* syscall secure computing */
|
|
|
|
#define TIF_SYSCALL_EMU 12 /* syscall emulation active */
|
2025-06-30 10:45:02 -07:00
|
|
|
#define TIF_PATCH_PENDING 13 /* pending live patching update */
|
2012-03-05 11:49:28 +00:00
|
|
|
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
|
|
|
#define TIF_FREEZE 19
|
|
|
|
#define TIF_RESTORE_SIGMASK 20
|
|
|
|
#define TIF_SINGLESTEP 21
|
|
|
|
#define TIF_32BIT 22 /* 32bit process */
|
arm64/sve: Core task context handling
This patch adds the core support for switching and managing the SVE
architectural state of user tasks.
Calls to the existing FPSIMD low-level save/restore functions are
factored out as new functions task_fpsimd_{save,load}(), since SVE
now dynamically may or may not need to be handled at these points
depending on the kernel configuration, hardware features discovered
at boot, and the runtime state of the task. To make these
decisions as fast as possible, const cpucaps are used where
feasible, via the system_supports_sve() helper.
The SVE registers are only tracked for threads that have explicitly
used SVE, indicated by the new thread flag TIF_SVE. Otherwise, the
FPSIMD view of the architectural state is stored in
thread.fpsimd_state as usual.
When in use, the SVE registers are not stored directly in
thread_struct due to their potentially large and variable size.
Because the task_struct slab allocator must be configured very
early during kernel boot, it is also tricky to configure it
correctly to match the maximum vector length provided by the
hardware, since this depends on examining secondary CPUs as well as
the primary. Instead, a pointer sve_state in thread_struct points
to a dynamically allocated buffer containing the SVE register data,
and code is added to allocate and free this buffer at appropriate
times.
TIF_SVE is set when taking an SVE access trap from userspace, if
suitable hardware support has been detected. This enables SVE for
the thread: a subsequent return to userspace will disable the trap
accordingly. If such a trap is taken without sufficient system-
wide hardware support, SIGILL is sent to the thread instead as if
an undefined instruction had been executed: this may happen if
userspace tries to use SVE in a system where not all CPUs support
it for example.
The kernel will clear TIF_SVE and disable SVE for the thread
whenever an explicit syscall is made by userspace. For backwards
compatibility reasons and conformance with the spirit of the base
AArch64 procedure call standard, the subset of the SVE register
state that aliases the FPSIMD registers is still preserved across a
syscall even if this happens. The remainder of the SVE register
state logically becomes zero at syscall entry, though the actual
zeroing work is currently deferred until the thread next tries to
use SVE, causing another trap to the kernel. This implementation
is suboptimal: in the future, the fastpath case may be optimised
to zero the registers in-place and leave SVE enabled for the task,
where beneficial.
TIF_SVE is also cleared in the following slowpath cases, which are
taken as reasonable hints that the task may no longer use SVE:
* exec
* fork and clone
Code is added to sync data between thread.fpsimd_state and
thread.sve_state whenever enabling/disabling SVE, in a manner
consistent with the SVE architectural programmer's model.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Alex Bennée <alex.bennee@linaro.org>
[will: added #include to fix allnoconfig build]
[will: use enable_daif in do_sve_acc]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 15:51:05 +00:00
|
|
|
#define TIF_SVE 23 /* Scalable Vector Extension in use */
|
2021-10-19 18:22:14 +01:00
|
|
|
#define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */
|
2018-05-29 13:11:13 +01:00
|
|
|
#define TIF_SSBD 25 /* Wants SSB mitigation */
|
2019-07-23 19:58:39 +02:00
|
|
|
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
|
2022-04-19 12:22:21 +01:00
|
|
|
#define TIF_SME 27 /* SME in use */
|
2022-04-19 12:22:19 +01:00
|
|
|
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
|
2023-12-08 12:32:21 +01:00
|
|
|
#define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */
|
arm64: Implement prctl(PR_{G,S}ET_TSC)
On arm64, this prctl controls access to CNTVCT_EL0, CNTVCTSS_EL0 and
CNTFRQ_EL0 via CNTKCTL_EL1.EL0VCTEN. Since this bit is also used to
implement various erratum workarounds, check whether the CPU needs
a workaround whenever we potentially need to change it.
This is needed for a correct implementation of non-instrumenting
record-replay debugging on arm64 (i.e. rr; https://rr-project.org/).
rr must trap and record any sources of non-determinism from the
userspace program's perspective so it can be replayed later. This
includes the results of syscalls as well as the results of access
to architected timers exposed directly to the program. This prctl
was originally added for x86 by commit 8fb402bccf20 ("generic, x86:
add prctl commands PR_GET_TSC and PR_SET_TSC"), and rr uses it to
trap RDTSC on x86 for the same reason.
We also considered exposing this as a PTRACE_EVENT. However, prctl
seems like a better choice for these reasons:
1) In general an in-process control seems more useful than an
out-of-process control, since anything that you would be able to
do with ptrace could also be done with prctl (tracer can inject a
call to the prctl and handle signal-delivery-stops), and it avoids
needing an additional process (which will complicate debugging
of the ptraced process since it cannot have more than one tracer,
and will be incompatible with ptrace_scope=3) in cases where that
is not otherwise necessary.
2) Consistency with x86_64. Note that on x86_64, RDTSC has been there
since the start, so it's the same situation as on arm64.
Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/I233a1867d1ccebe2933a347552e7eae862344421
Link: https://lore.kernel.org/r/20240824015415.488474-1-pcc@google.com
Signed-off-by: Will Deacon <will@kernel.org>
2024-08-23 18:54:13 -07:00
|
|
|
#define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */
|
2025-04-22 09:18:19 +01:00
|
|
|
#define TIF_LAZY_MMU 31 /* Task in lazy mmu mode */
|
|
|
|
#define TIF_LAZY_MMU_PENDING 32 /* Ops pending for lazy mmu mode exit */
|
2012-03-05 11:49:28 +00:00
|
|
|
|
|
|
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
|
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
2025-03-05 11:49:25 +01:00
|
|
|
#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
2012-03-05 11:49:28 +00:00
|
|
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
2014-05-08 11:20:23 +02:00
|
|
|
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
|
2014-04-30 10:51:29 +01:00
|
|
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
|
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
|
|
|
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
|
|
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
2019-05-23 10:06:18 +01:00
|
|
|
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
2025-06-30 10:45:02 -07:00
|
|
|
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
|
2016-11-02 14:40:46 +05:30
|
|
|
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
2020-07-02 21:16:20 +01:00
|
|
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
2012-03-05 11:49:28 +00:00
|
|
|
#define _TIF_32BIT (1 << TIF_32BIT)
|
arm64/sve: Core task context handling
This patch adds the core support for switching and managing the SVE
architectural state of user tasks.
Calls to the existing FPSIMD low-level save/restore functions are
factored out as new functions task_fpsimd_{save,load}(), since SVE
now dynamically may or may not need to be handled at these points
depending on the kernel configuration, hardware features discovered
at boot, and the runtime state of the task. To make these
decisions as fast as possible, const cpucaps are used where
feasible, via the system_supports_sve() helper.
The SVE registers are only tracked for threads that have explicitly
used SVE, indicated by the new thread flag TIF_SVE. Otherwise, the
FPSIMD view of the architectural state is stored in
thread.fpsimd_state as usual.
When in use, the SVE registers are not stored directly in
thread_struct due to their potentially large and variable size.
Because the task_struct slab allocator must be configured very
early during kernel boot, it is also tricky to configure it
correctly to match the maximum vector length provided by the
hardware, since this depends on examining secondary CPUs as well as
the primary. Instead, a pointer sve_state in thread_struct points
to a dynamically allocated buffer containing the SVE register data,
and code is added to allocate and free this buffer at appropriate
times.
TIF_SVE is set when taking an SVE access trap from userspace, if
suitable hardware support has been detected. This enables SVE for
the thread: a subsequent return to userspace will disable the trap
accordingly. If such a trap is taken without sufficient system-
wide hardware support, SIGILL is sent to the thread instead as if
an undefined instruction had been executed: this may happen if
userspace tries to use SVE in a system where not all CPUs support
it for example.
The kernel will clear TIF_SVE and disable SVE for the thread
whenever an explicit syscall is made by userspace. For backwards
compatibility reasons and conformance with the spirit of the base
AArch64 procedure call standard, the subset of the SVE register
state that aliases the FPSIMD registers is still preserved across a
syscall even if this happens. The remainder of the SVE register
state logically becomes zero at syscall entry, though the actual
zeroing work is currently deferred until the thread next tries to
use SVE, causing another trap to the kernel. This implementation
is suboptimal: in the future, the fastpath case may be optimised
to zero the registers in-place and leave SVE enabled for the task,
where beneficial.
TIF_SVE is also cleared in the following slowpath cases, which are
taken as reasonable hints that the task may no longer use SVE:
* exec
* fork and clone
Code is added to sync data between thread.fpsimd_state and
thread.sve_state whenever enabling/disabling SVE, in a manner
consistent with the SVE architectural programmer's model.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Alex Bennée <alex.bennee@linaro.org>
[will: added #include to fix allnoconfig build]
[will: use enable_daif in do_sve_acc]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 15:51:05 +00:00
|
|
|
#define _TIF_SVE (1 << TIF_SVE)
|
2019-09-16 11:51:17 +01:00
|
|
|
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
|
2020-10-22 20:09:23 -06:00
|
|
|
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
|
arm64: Implement prctl(PR_{G,S}ET_TSC)
On arm64, this prctl controls access to CNTVCT_EL0, CNTVCTSS_EL0 and
CNTFRQ_EL0 via CNTKCTL_EL1.EL0VCTEN. Since this bit is also used to
implement various erratum workarounds, check whether the CPU needs
a workaround whenever we potentially need to change it.
This is needed for a correct implementation of non-instrumenting
record-replay debugging on arm64 (i.e. rr; https://rr-project.org/).
rr must trap and record any sources of non-determinism from the
userspace program's perspective so it can be replayed later. This
includes the results of syscalls as well as the results of access
to architected timers exposed directly to the program. This prctl
was originally added for x86 by commit 8fb402bccf20 ("generic, x86:
add prctl commands PR_GET_TSC and PR_SET_TSC"), and rr uses it to
trap RDTSC on x86 for the same reason.
We also considered exposing this as a PTRACE_EVENT. However, prctl
seems like a better choice for these reasons:
1) In general an in-process control seems more useful than an
out-of-process control, since anything that you would be able to
do with ptrace could also be done with prctl (tracer can inject a
call to the prctl and handle signal-delivery-stops), and it avoids
needing an additional process (which will complicate debugging
of the ptraced process since it cannot have more than one tracer,
and will be incompatible with ptrace_scope=3) in cases where that
is not otherwise necessary.
2) Consistency with x86_64. Note that on x86_64, RDTSC has been there
since the start, so it's the same situation as on arm64.
Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/I233a1867d1ccebe2933a347552e7eae862344421
Link: https://lore.kernel.org/r/20240824015415.488474-1-pcc@google.com
Signed-off-by: Will Deacon <will@kernel.org>
2024-08-23 18:54:13 -07:00
|
|
|
#define _TIF_TSC_SIGSEGV (1 << TIF_TSC_SIGSEGV)
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2025-03-05 11:49:25 +01:00
|
|
|
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
|
2016-11-02 14:40:46 +05:30
|
|
|
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
2020-12-16 12:33:35 -08:00
|
|
|
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
|
2025-06-30 10:45:02 -07:00
|
|
|
_TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING | \
|
|
|
|
_TIF_PATCH_PENDING)
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2014-04-30 10:51:29 +01:00
|
|
|
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
2014-05-30 12:34:15 -07:00
|
|
|
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
2020-01-28 13:50:32 +01:00
|
|
|
_TIF_SYSCALL_EMU)
|
2012-03-05 11:49:28 +00:00
|
|
|
|
2020-04-27 09:00:16 -07:00
|
|
|
#ifdef CONFIG_SHADOW_CALL_STACK
|
|
|
|
#define INIT_SCS \
|
|
|
|
.scs_base = init_shadow_call_stack, \
|
2020-05-15 14:11:05 +01:00
|
|
|
.scs_sp = init_shadow_call_stack,
|
2020-04-27 09:00:16 -07:00
|
|
|
#else
|
|
|
|
#define INIT_SCS
|
|
|
|
#endif
|
|
|
|
|
2018-05-24 15:54:30 +01:00
|
|
|
#define INIT_THREAD_INFO(tsk) \
|
|
|
|
{ \
|
|
|
|
.flags = _TIF_FOREIGN_FPSTATE, \
|
|
|
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
2020-04-27 09:00:16 -07:00
|
|
|
INIT_SCS \
|
2018-05-24 15:54:30 +01:00
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:28 +00:00
|
|
|
#endif /* __ASM_THREAD_INFO_H */
|