2019-06-03 07:44:50 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/uaccess.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_UACCESS_H
|
|
|
|
#define __ASM_UACCESS_H
|
|
|
|
|
2016-07-01 14:58:21 +01:00
|
|
|
#include <asm/alternative.h>
|
2016-07-01 16:53:00 +01:00
|
|
|
#include <asm/kernel-pgtable.h>
|
2016-07-01 14:58:21 +01:00
|
|
|
#include <asm/sysreg.h>
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* User space memory access functions
|
|
|
|
*/
|
2016-10-19 14:40:54 +01:00
|
|
|
#include <linux/bitops.h>
|
2016-06-08 14:40:56 -07:00
|
|
|
#include <linux/kasan-checks.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
#include <linux/string.h>
|
|
|
|
|
arm64: extable: consolidate definitions
In subsequent patches we'll alter the structure and usage of struct
exception_table_entry. For inline assembly, we create these using the
`_ASM_EXTABLE()` CPP macro defined in <asm/uaccess.h>, and for plain
assembly code we use the `_asm_extable()` GAS macro defined in
<asm/assembler.h>, which are largely identical save for different
escaping and stringification requirements.
This patch moves the common definitions to a new <asm/asm-extable.h>
header, so that it's easier to keep the two in-sync, and to remove the
implication that these are only used for uaccess helpers (as e.g.
load_unaligned_zeropad() is only used on kernel memory, and depends upon
`_ASM_EXTABLE()`.
At the same time, a few minor modifications are made for clarity and in
preparation for subsequent patches:
* The structure creation is factored out into an `__ASM_EXTABLE_RAW()`
macro. This will make it easier to support different fixup variants in
subsequent patches without needing to update all users of
`_ASM_EXTABLE()`, and makes it easier to see tha the CPP and GAS
variants of the macros are structurally identical.
For the CPP macro, the stringification of fields is left to the
wrapper macro, `_ASM_EXTABLE()`, as in subsequent patches it will be
necessary to stringify fields in wrapper macros to safely concatenate
strings which cannot be token-pasted together in CPP.
* The fields of the structure are created separately on their own lines.
This will make it easier to add/remove/modify individual fields
clearly.
* Additional parentheses are added around the use of macro arguments in
field definitions to avoid any potential problems with evaluation due
to operator precedence, and to make errors upon misuse clearer.
* USER() is moved into <asm/asm-uaccess.h>, as it is not required by all
assembly code, and is already refered to by comments in that file.
There should be no functional change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-8-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-10-19 17:02:13 +01:00
|
|
|
#include <asm/asm-extable.h>
|
2015-07-22 19:05:54 +01:00
|
|
|
#include <asm/cpufeature.h>
|
2020-06-30 13:53:07 +01:00
|
|
|
#include <asm/mmu.h>
|
2021-03-15 13:20:15 +00:00
|
|
|
#include <asm/mte.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/memory.h>
|
2016-12-25 14:00:03 -05:00
|
|
|
#include <asm/extable.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
|
uaccess: generalize access_ok()
There are many different ways that access_ok() is defined across
architectures, but in the end, they all just compare against the
user_addr_max() value or they accept anything.
Provide one definition that works for most architectures, checking
against TASK_SIZE_MAX for user processes or skipping the check inside
of uaccess_kernel() sections.
For architectures without CONFIG_SET_FS(), this should be the fastest
check, as it comes down to a single comparison of a pointer against a
compile-time constant, while the architecture specific versions tend to
do something more complex for historic reasons or get something wrong.
Type checking for __user annotations is handled inconsistently across
architectures, but this is easily simplified as well by using an inline
function that takes a 'const void __user *' argument. A handful of
callers need an extra __user annotation for this.
Some architectures had trick to use 33-bit or 65-bit arithmetic on the
addresses to calculate the overflow, however this simpler version uses
fewer registers, which means it can produce better object code in the
end despite needing a second (statically predicted) branch.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mark Rutland <mark.rutland@arm.com> [arm64, asm-generic]
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Stafford Horne <shorne@gmail.com>
Acked-by: Dinh Nguyen <dinguyen@kernel.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2022-02-15 17:55:04 +01:00
|
|
|
static inline int __access_ok(const void __user *ptr, unsigned long size);
|
2022-02-14 09:00:10 +01:00
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* Test whether a block of memory is a valid user space address.
|
|
|
|
* Returns 1 if the range is valid, 0 otherwise.
|
|
|
|
*
|
|
|
|
* This is equivalent to the following test:
|
arm64: uaccess: remove set_fs()
Now that the uaccess primitives dont take addr_limit into account, we
have no need to manipulate this via set_fs() and get_fs(). Remove
support for these, along with some infrastructure this renders
redundant.
We no longer need to flip UAO to access kernel memory under KERNEL_DS,
and head.S unconditionally clears UAO for all kernel configurations via
an ERET in init_kernel_el. Thus, we don't need to dynamically flip UAO,
nor do we need to context-switch it. However, we still need to adjust
PAN during SDEI entry.
Masking of __user pointers no longer needs to use the dynamic value of
addr_limit, and can use a constant derived from the maximum possible
userspace task size. A new TASK_SIZE_MAX constant is introduced for
this, which is also used by core code. In configurations supporting
52-bit VAs, this may include a region of unusable VA space above a
48-bit TTBR0 limit, but never includes any portion of TTBR1.
Note that TASK_SIZE_MAX is an exclusive limit, while USER_DS and
KERNEL_DS were inclusive limits, and is converted to a mask by
subtracting one.
As the SDEI entry code repurposes the otherwise unnecessary
pt_regs::orig_addr_limit field to store the TTBR1 of the interrupted
context, for now we rename that to pt_regs::sdei_ttbr1. In future we can
consider factoring that out.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: James Morse <james.morse@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-10-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2020-12-02 13:15:55 +00:00
|
|
|
* (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
|
2012-03-05 11:49:32 +00:00
|
|
|
*/
|
2022-02-14 09:00:10 +01:00
|
|
|
static inline int access_ok(const void __user *addr, unsigned long size)
|
2018-02-05 15:34:18 +00:00
|
|
|
{
|
2019-12-05 13:57:36 +00:00
|
|
|
/*
|
|
|
|
* Asynchronous I/O running in a kernel thread does not have the
|
|
|
|
* TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
|
|
|
|
* the user address before checking.
|
|
|
|
*/
|
2019-07-23 19:58:39 +02:00
|
|
|
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
|
2019-12-05 13:57:36 +00:00
|
|
|
(current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
|
2019-07-23 19:58:39 +02:00
|
|
|
addr = untagged_addr(addr);
|
2019-07-23 19:58:38 +02:00
|
|
|
|
2022-02-14 09:00:10 +01:00
|
|
|
return likely(__access_ok(addr, size));
|
2018-02-05 15:34:18 +00:00
|
|
|
}
|
uaccess: generalize access_ok()
There are many different ways that access_ok() is defined across
architectures, but in the end, they all just compare against the
user_addr_max() value or they accept anything.
Provide one definition that works for most architectures, checking
against TASK_SIZE_MAX for user processes or skipping the check inside
of uaccess_kernel() sections.
For architectures without CONFIG_SET_FS(), this should be the fastest
check, as it comes down to a single comparison of a pointer against a
compile-time constant, while the architecture specific versions tend to
do something more complex for historic reasons or get something wrong.
Type checking for __user annotations is handled inconsistently across
architectures, but this is easily simplified as well by using an inline
function that takes a 'const void __user *' argument. A handful of
callers need an extra __user annotation for this.
Some architectures had trick to use 33-bit or 65-bit arithmetic on the
addresses to calculate the overflow, however this simpler version uses
fewer registers, which means it can produce better object code in the
end despite needing a second (statically predicted) branch.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Mark Rutland <mark.rutland@arm.com> [arm64, asm-generic]
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Stafford Horne <shorne@gmail.com>
Acked-by: Dinh Nguyen <dinguyen@kernel.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2022-02-15 17:55:04 +01:00
|
|
|
#define access_ok access_ok
|
|
|
|
|
|
|
|
#include <asm-generic/access_ok.h>
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2016-07-01 14:58:21 +01:00
|
|
|
/*
|
|
|
|
* User access enabling/disabling.
|
|
|
|
*/
|
2016-07-01 16:53:00 +01:00
|
|
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
|
|
|
static inline void __uaccess_ttbr0_disable(void)
|
|
|
|
{
|
2018-01-10 13:18:30 +00:00
|
|
|
unsigned long flags, ttbr;
|
2016-07-01 16:53:00 +01:00
|
|
|
|
2018-01-10 13:18:30 +00:00
|
|
|
local_irq_save(flags);
|
2017-08-10 13:58:16 +01:00
|
|
|
ttbr = read_sysreg(ttbr1_el1);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr &= ~TTBR_ASID_MASK;
|
2020-11-03 10:22:29 +00:00
|
|
|
/* reserved_pg_dir placed before swapper_pg_dir */
|
2021-02-02 12:36:57 +00:00
|
|
|
write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
|
2017-08-10 13:58:16 +01:00
|
|
|
/* Set reserved ASID */
|
|
|
|
write_sysreg(ttbr, ttbr1_el1);
|
2016-07-01 16:53:00 +01:00
|
|
|
isb();
|
2018-01-10 13:18:30 +00:00
|
|
|
local_irq_restore(flags);
|
2016-07-01 16:53:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __uaccess_ttbr0_enable(void)
|
|
|
|
{
|
2017-08-10 13:58:16 +01:00
|
|
|
unsigned long flags, ttbr0, ttbr1;
|
2016-07-01 16:53:00 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable interrupts to avoid preemption between reading the 'ttbr0'
|
|
|
|
* variable and the MSR. A context switch could trigger an ASID
|
|
|
|
* roll-over and an update of 'ttbr0'.
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
|
2017-08-10 13:58:16 +01:00
|
|
|
|
|
|
|
/* Restore active ASID */
|
|
|
|
ttbr1 = read_sysreg(ttbr1_el1);
|
2018-01-10 13:18:30 +00:00
|
|
|
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
|
2017-12-01 17:33:48 +00:00
|
|
|
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
|
2017-08-10 13:58:16 +01:00
|
|
|
write_sysreg(ttbr1, ttbr1_el1);
|
|
|
|
|
|
|
|
/* Restore user page table */
|
|
|
|
write_sysreg(ttbr0, ttbr0_el1);
|
2016-07-01 16:53:00 +01:00
|
|
|
isb();
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_disable(void)
|
|
|
|
{
|
|
|
|
if (!system_uses_ttbr0_pan())
|
|
|
|
return false;
|
|
|
|
__uaccess_ttbr0_disable();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_enable(void)
|
|
|
|
{
|
|
|
|
if (!system_uses_ttbr0_pan())
|
|
|
|
return false;
|
|
|
|
__uaccess_ttbr0_enable();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool uaccess_ttbr0_disable(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool uaccess_ttbr0_enable(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-01-08 15:38:11 +00:00
|
|
|
static inline void __uaccess_disable_hw_pan(void)
|
|
|
|
{
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
|
|
|
|
CONFIG_ARM64_PAN));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __uaccess_enable_hw_pan(void)
|
|
|
|
{
|
|
|
|
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
|
|
|
|
CONFIG_ARM64_PAN));
|
|
|
|
}
|
|
|
|
|
2021-01-04 12:36:14 +00:00
|
|
|
static inline void uaccess_disable_privileged(void)
|
|
|
|
{
|
2023-03-11 00:43:31 +01:00
|
|
|
mte_disable_tco();
|
2020-12-22 12:01:35 -08:00
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
if (uaccess_ttbr0_disable())
|
|
|
|
return;
|
2016-07-01 14:58:21 +01:00
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
__uaccess_enable_hw_pan();
|
2016-07-01 14:58:21 +01:00
|
|
|
}
|
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
static inline void uaccess_enable_privileged(void)
|
2016-07-01 14:58:21 +01:00
|
|
|
{
|
2023-03-11 00:43:31 +01:00
|
|
|
mte_enable_tco();
|
2020-12-22 12:01:35 -08:00
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
if (uaccess_ttbr0_enable())
|
|
|
|
return;
|
2016-07-01 14:58:21 +01:00
|
|
|
|
2020-12-02 13:15:57 +00:00
|
|
|
__uaccess_disable_hw_pan();
|
2016-07-01 14:58:21 +01:00
|
|
|
}
|
|
|
|
|
2018-02-05 15:34:19 +00:00
|
|
|
/*
|
arm64: uaccess: simplify uaccess_mask_ptr()
We introduced uaccess pointer masking for arm64 in commit:
4d8efc2d5ee4c9cc ("arm64: Use pointer masking to limit uaccess speculation")
Which was intended to prevent speculative uaccesses to kernel memory on
CPUs where access permissions were not respected under speculation.
At the time, the uaccess primitives were occasionally used to access
kernel memory, with the maximum permitted address held in
thread_info::addr_limit. Consequently, the address masking needed to
take this dynamic limit into account.
Subsequently the uaccess primitives were reworked such that they are
only used for user memory, and as of commit:
3d2403fd10a1dbb3 ("arm64: uaccess: remove set_fs()")
... the address limit was made a compile-time constant, but the logic
was otherwise unchanged.
Regardless of the configured VA size or whether TBI is in use, the
address space can be divided into three ranges:
* The TTBR0 VA range, for which any valid pointer has bit 55 *clear*,
and any non-tag bits [63-56] must match bit 55 (i.e. must be clear).
* The TTBR1 VA range, for which any valid pointer has bit 55 *set*, and
any non-tag bits [63-56] must match bit 55 (i.e. must be set).
* The gap between the TTBR0 and TTBR1 ranges, where bit 55 may be set or
clear, but any access will result in a fault.
As the uaccess primitives are now only used for user memory in the TTBR0
VA range, we can prevent generation of TTBR1 addresses by clearing bit
55, which will either result in a TTBR0 address or a faulting address
between the TTBR VA ranges.
This is beneficial for code generation as:
* We no longer clobber the condition codes.
* We no longer burn a register on (TASK_SIZE_MAX - 1).
* We no longer need to consume the untagged pointer.
When building a defconfig v6.0-rc3 with GCC 12.1.0, this change makes
the resulting Image 64KiB smaller.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20220922151053.3520750-1-mark.rutland@arm.com
[catalin.marinas@arm.com: remove csdb() as the bit clearing is unconditional]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-22 16:10:53 +01:00
|
|
|
* Sanitize a uaccess pointer such that it cannot reach any kernel address.
|
|
|
|
*
|
|
|
|
* Clearing bit 55 ensures the pointer cannot address any portion of the TTBR1
|
|
|
|
* address range (i.e. any kernel address), and either the pointer falls within
|
|
|
|
* the TTBR0 address range or must cause a fault.
|
2018-02-05 15:34:19 +00:00
|
|
|
*/
|
|
|
|
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
|
|
|
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|
|
|
{
|
|
|
|
void __user *safe_ptr;
|
|
|
|
|
|
|
|
asm volatile(
|
arm64: uaccess: simplify uaccess_mask_ptr()
We introduced uaccess pointer masking for arm64 in commit:
4d8efc2d5ee4c9cc ("arm64: Use pointer masking to limit uaccess speculation")
Which was intended to prevent speculative uaccesses to kernel memory on
CPUs where access permissions were not respected under speculation.
At the time, the uaccess primitives were occasionally used to access
kernel memory, with the maximum permitted address held in
thread_info::addr_limit. Consequently, the address masking needed to
take this dynamic limit into account.
Subsequently the uaccess primitives were reworked such that they are
only used for user memory, and as of commit:
3d2403fd10a1dbb3 ("arm64: uaccess: remove set_fs()")
... the address limit was made a compile-time constant, but the logic
was otherwise unchanged.
Regardless of the configured VA size or whether TBI is in use, the
address space can be divided into three ranges:
* The TTBR0 VA range, for which any valid pointer has bit 55 *clear*,
and any non-tag bits [63-56] must match bit 55 (i.e. must be clear).
* The TTBR1 VA range, for which any valid pointer has bit 55 *set*, and
any non-tag bits [63-56] must match bit 55 (i.e. must be set).
* The gap between the TTBR0 and TTBR1 ranges, where bit 55 may be set or
clear, but any access will result in a fault.
As the uaccess primitives are now only used for user memory in the TTBR0
VA range, we can prevent generation of TTBR1 addresses by clearing bit
55, which will either result in a TTBR0 address or a faulting address
between the TTBR VA ranges.
This is beneficial for code generation as:
* We no longer clobber the condition codes.
* We no longer burn a register on (TASK_SIZE_MAX - 1).
* We no longer need to consume the untagged pointer.
When building a defconfig v6.0-rc3 with GCC 12.1.0, this change makes
the resulting Image 64KiB smaller.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20220922151053.3520750-1-mark.rutland@arm.com
[catalin.marinas@arm.com: remove csdb() as the bit clearing is unconditional]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-09-22 16:10:53 +01:00
|
|
|
" bic %0, %1, %2\n"
|
|
|
|
: "=r" (safe_ptr)
|
|
|
|
: "r" (ptr),
|
|
|
|
"i" (BIT(55))
|
|
|
|
);
|
|
|
|
|
2018-02-05 15:34:19 +00:00
|
|
|
return safe_ptr;
|
|
|
|
}
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
/*
|
|
|
|
* The "__xxx" versions of the user access functions do not verify the address
|
|
|
|
* space - it must have been done previously with a separate "access_ok()"
|
|
|
|
* call.
|
|
|
|
*
|
|
|
|
* The "__xxx_error" versions set the third argument to -EFAULT if an error
|
|
|
|
* occurs, and leave it unchanged on success.
|
|
|
|
*/
|
2024-06-09 10:11:04 -07:00
|
|
|
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
|
|
|
#define __get_mem_asm(load, reg, x, addr, label, type) \
|
|
|
|
asm_goto_output( \
|
|
|
|
"1: " load " " reg "0, [%1]\n" \
|
arm64: uaccess: correct thinko in __get_mem_asm()
In the CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y version of __get_mem_asm(), we
incorrectly use _ASM_EXTABLE_##type##ACCESS_ERR() such that upon a fault
the extable fixup handler writes -EFAULT into "%w0", which is the
register containing 'x' (the result of the load).
This was a thinko in commit:
86a6a68febfcf57b ("arm64: start using 'asm goto' for get_user() when available")
Prior to that commit _ASM_EXTABLE_##type##ACCESS_ERR_ZERO() was used
such that the extable fixup handler wrote -EFAULT into "%w0" (the
register containing 'err'), and zero into "%w1" (the register containing
'x'). When the 'err' variable was removed, the extable entry was updated
incorrectly.
Writing -EFAULT to the value register is unnecessary but benign:
* We never want -EFAULT in the value register, and previously this would
have been zeroed in the extable fixup handler.
* In __get_user_error() the value is overwritten with zero explicitly in
the error path.
* The asm goto outputs cannot be used when the goto label is taken, as
older compilers (e.g. clang < 16.0.0) do not guarantee that asm goto
outputs are usable in this path and may use a stale value rather than
the value in an output register. Consequently, zeroing in the extable
fixup handler is insufficient to ensure callers see zero in the error
path.
* The expected usage of unsafe_get_user() and get_kernel_nofault()
requires that the value is not consumed in the error path.
Some versions of GCC would mis-compile asm goto with outputs, and
erroneously omit subsequent assignments, breaking the error path
handling in __get_user_error(). This was discussed at:
https://lore.kernel.org/lkml/ZpfxLrJAOF2YNqCk@J2N7QTR9R3.cambridge.arm.com/
... and was fixed by removing support for asm goto with outputs on those
broken compilers in commit:
f2f6a8e887172503 ("init/Kconfig: remove CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND")
With that out of the way, we can safely replace the usage of
_ASM_EXTABLE_##type##ACCESS_ERR() with _ASM_EXTABLE_##type##ACCESS(),
leaving the value register unchanged in the case a fault is taken, as
was originally intended. This matches other architectures and matches
our __put_mem_asm().
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20240807103731.2498893-1-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2024-08-07 11:37:31 +01:00
|
|
|
_ASM_EXTABLE_##type##ACCESS(1b, %l2) \
|
2024-06-09 10:11:04 -07:00
|
|
|
: "=r" (x) \
|
|
|
|
: "r" (addr) : : label)
|
|
|
|
#else
|
|
|
|
#define __get_mem_asm(load, reg, x, addr, label, type) do { \
|
|
|
|
int __gma_err = 0; \
|
2012-03-05 11:49:32 +00:00
|
|
|
asm volatile( \
|
2020-12-02 13:15:53 +00:00
|
|
|
"1: " load " " reg "1, [%2]\n" \
|
2012-03-05 11:49:32 +00:00
|
|
|
"2:\n" \
|
2022-06-21 07:26:29 +00:00
|
|
|
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
|
2024-06-09 10:11:04 -07:00
|
|
|
: "+r" (__gma_err), "=r" (x) \
|
|
|
|
: "r" (addr)); \
|
|
|
|
if (__gma_err) goto label; } while (0)
|
|
|
|
#endif
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2024-06-09 10:11:04 -07:00
|
|
|
#define __raw_get_mem(ldr, x, ptr, label, type) \
|
2022-06-21 07:26:29 +00:00
|
|
|
do { \
|
|
|
|
unsigned long __gu_val; \
|
|
|
|
switch (sizeof(*(ptr))) { \
|
|
|
|
case 1: \
|
2024-06-09 10:11:04 -07:00
|
|
|
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
case 2: \
|
2024-06-09 10:11:04 -07:00
|
|
|
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
case 4: \
|
2024-06-09 10:11:04 -07:00
|
|
|
__get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
case 8: \
|
2024-06-09 10:11:04 -07:00
|
|
|
__get_mem_asm(ldr, "%x", __gu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
BUILD_BUG(); \
|
|
|
|
} \
|
|
|
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
2018-09-06 12:09:56 +01:00
|
|
|
} while (0)
|
|
|
|
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
/*
|
|
|
|
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
|
|
|
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
|
|
|
* we must evaluate these outside of the critical section.
|
|
|
|
*/
|
2024-06-09 10:11:04 -07:00
|
|
|
#define __raw_get_user(x, ptr, label) \
|
2020-12-02 13:15:52 +00:00
|
|
|
do { \
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
|
|
|
|
__typeof__(x) __rgu_val; \
|
2020-12-02 13:15:52 +00:00
|
|
|
__chk_user_ptr(ptr); \
|
2024-06-09 10:11:04 -07:00
|
|
|
do { \
|
|
|
|
__label__ __rgu_failed; \
|
|
|
|
uaccess_ttbr0_enable(); \
|
|
|
|
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U); \
|
|
|
|
uaccess_ttbr0_disable(); \
|
|
|
|
(x) = __rgu_val; \
|
|
|
|
break; \
|
|
|
|
__rgu_failed: \
|
|
|
|
uaccess_ttbr0_disable(); \
|
|
|
|
goto label; \
|
|
|
|
} while (0); \
|
2020-12-02 13:15:52 +00:00
|
|
|
} while (0)
|
|
|
|
|
2019-01-15 13:58:26 +00:00
|
|
|
#define __get_user_error(x, ptr, err) \
|
|
|
|
do { \
|
2024-06-09 10:11:04 -07:00
|
|
|
__label__ __gu_failed; \
|
2018-02-05 15:34:22 +00:00
|
|
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
|
might_fault(); \
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 18:57:57 -08:00
|
|
|
if (access_ok(__p, sizeof(*__p))) { \
|
2018-02-05 15:34:22 +00:00
|
|
|
__p = uaccess_mask_ptr(__p); \
|
2024-06-09 10:11:04 -07:00
|
|
|
__raw_get_user((x), __p, __gu_failed); \
|
2018-02-05 15:34:22 +00:00
|
|
|
} else { \
|
2024-06-09 10:11:04 -07:00
|
|
|
__gu_failed: \
|
2020-05-22 15:23:21 +01:00
|
|
|
(x) = (__force __typeof__(x))0; (err) = -EFAULT; \
|
2018-02-05 15:34:22 +00:00
|
|
|
} \
|
2019-01-15 13:58:26 +00:00
|
|
|
} while (0)
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define __get_user(x, ptr) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
int __gu_err = 0; \
|
2019-01-15 13:58:26 +00:00
|
|
|
__get_user_error((x), (ptr), __gu_err); \
|
2018-02-05 15:34:22 +00:00
|
|
|
__gu_err; \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define get_user __get_user
|
|
|
|
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
/*
|
2023-03-11 00:43:31 +01:00
|
|
|
* We must not call into the scheduler between __mte_enable_tco_async() and
|
|
|
|
* __mte_disable_tco_async(). As `dst` and `src` may contain blocking
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
* functions, we must evaluate these outside of the critical section.
|
|
|
|
*/
|
2020-12-02 13:15:53 +00:00
|
|
|
#define __get_kernel_nofault(dst, src, type, err_label) \
|
|
|
|
do { \
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
__typeof__(dst) __gkn_dst = (dst); \
|
|
|
|
__typeof__(src) __gkn_src = (src); \
|
2024-06-09 10:11:04 -07:00
|
|
|
do { \
|
|
|
|
__label__ __gkn_label; \
|
2020-12-02 13:15:53 +00:00
|
|
|
\
|
2024-06-09 10:11:04 -07:00
|
|
|
__mte_enable_tco_async(); \
|
|
|
|
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
|
|
|
|
(__force type *)(__gkn_src), __gkn_label, K); \
|
|
|
|
__mte_disable_tco_async(); \
|
|
|
|
break; \
|
|
|
|
__gkn_label: \
|
|
|
|
__mte_disable_tco_async(); \
|
2020-12-02 13:15:53 +00:00
|
|
|
goto err_label; \
|
2024-06-09 10:11:04 -07:00
|
|
|
} while (0); \
|
2020-12-02 13:15:53 +00:00
|
|
|
} while (0)
|
|
|
|
|
2024-06-01 15:04:53 -07:00
|
|
|
#define __put_mem_asm(store, reg, x, addr, label, type) \
|
|
|
|
asm goto( \
|
|
|
|
"1: " store " " reg "0, [%1]\n" \
|
2012-03-05 11:49:32 +00:00
|
|
|
"2:\n" \
|
2024-06-01 15:04:53 -07:00
|
|
|
_ASM_EXTABLE_##type##ACCESS(1b, %l2) \
|
|
|
|
: : "rZ" (x), "r" (addr) : : label)
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2024-06-01 15:04:53 -07:00
|
|
|
#define __raw_put_mem(str, x, ptr, label, type) \
|
2022-06-21 07:26:29 +00:00
|
|
|
do { \
|
|
|
|
__typeof__(*(ptr)) __pu_val = (x); \
|
|
|
|
switch (sizeof(*(ptr))) { \
|
|
|
|
case 1: \
|
2024-06-01 15:04:53 -07:00
|
|
|
__put_mem_asm(str "b", "%w", __pu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
case 2: \
|
2024-06-01 15:04:53 -07:00
|
|
|
__put_mem_asm(str "h", "%w", __pu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
case 4: \
|
2024-06-01 15:04:53 -07:00
|
|
|
__put_mem_asm(str, "%w", __pu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
case 8: \
|
2024-06-01 15:04:53 -07:00
|
|
|
__put_mem_asm(str, "%x", __pu_val, (ptr), label, type); \
|
2022-06-21 07:26:29 +00:00
|
|
|
break; \
|
|
|
|
default: \
|
|
|
|
BUILD_BUG(); \
|
|
|
|
} \
|
2020-12-02 13:15:52 +00:00
|
|
|
} while (0)
|
|
|
|
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
/*
|
|
|
|
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
|
|
|
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
|
|
|
* we must evaluate these outside of the critical section.
|
|
|
|
*/
|
2024-06-01 15:04:53 -07:00
|
|
|
#define __raw_put_user(x, ptr, label) \
|
2020-12-02 13:15:52 +00:00
|
|
|
do { \
|
2024-06-01 15:04:53 -07:00
|
|
|
__label__ __rpu_failed; \
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
|
|
|
|
__typeof__(*(ptr)) __rpu_val = (x); \
|
|
|
|
__chk_user_ptr(__rpu_ptr); \
|
|
|
|
\
|
2024-06-01 15:04:53 -07:00
|
|
|
do { \
|
|
|
|
uaccess_ttbr0_enable(); \
|
|
|
|
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U); \
|
|
|
|
uaccess_ttbr0_disable(); \
|
|
|
|
break; \
|
|
|
|
__rpu_failed: \
|
|
|
|
uaccess_ttbr0_disable(); \
|
|
|
|
goto label; \
|
|
|
|
} while (0); \
|
2012-03-05 11:49:32 +00:00
|
|
|
} while (0)
|
|
|
|
|
2019-01-15 13:58:26 +00:00
|
|
|
#define __put_user_error(x, ptr, err) \
|
|
|
|
do { \
|
2024-06-01 15:04:53 -07:00
|
|
|
__label__ __pu_failed; \
|
2018-02-05 15:34:22 +00:00
|
|
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
|
|
|
might_fault(); \
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 18:57:57 -08:00
|
|
|
if (access_ok(__p, sizeof(*__p))) { \
|
2018-02-05 15:34:22 +00:00
|
|
|
__p = uaccess_mask_ptr(__p); \
|
2024-06-01 15:04:53 -07:00
|
|
|
__raw_put_user((x), __p, __pu_failed); \
|
2018-02-05 15:34:22 +00:00
|
|
|
} else { \
|
2024-06-01 15:04:53 -07:00
|
|
|
__pu_failed: \
|
2018-02-05 15:34:22 +00:00
|
|
|
(err) = -EFAULT; \
|
|
|
|
} \
|
2019-01-15 13:58:26 +00:00
|
|
|
} while (0)
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define __put_user(x, ptr) \
|
2012-03-05 11:49:32 +00:00
|
|
|
({ \
|
2018-02-05 15:34:22 +00:00
|
|
|
int __pu_err = 0; \
|
2019-01-15 13:58:26 +00:00
|
|
|
__put_user_error((x), (ptr), __pu_err); \
|
2018-02-05 15:34:22 +00:00
|
|
|
__pu_err; \
|
2012-03-05 11:49:32 +00:00
|
|
|
})
|
|
|
|
|
2018-02-05 15:34:22 +00:00
|
|
|
#define put_user __put_user
|
|
|
|
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
/*
|
2023-03-11 00:43:31 +01:00
|
|
|
* We must not call into the scheduler between __mte_enable_tco_async() and
|
|
|
|
* __mte_disable_tco_async(). As `dst` and `src` may contain blocking
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
* functions, we must evaluate these outside of the critical section.
|
|
|
|
*/
|
2020-12-02 13:15:53 +00:00
|
|
|
#define __put_kernel_nofault(dst, src, type, err_label) \
|
|
|
|
do { \
|
arm64: uaccess: avoid blocking within critical sections
As Vincent reports in:
https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
The put_user() in schedule_tail() can get stuck in a livelock, similar
to a problem recently fixed on riscv in commit:
285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access")
In __raw_put_user() we have a critical section between
uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot
safely call into the scheduler without having taken an exception, as
schedule() and other scheduling functions will not save/restore the
TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user()
contain a blocking call, we may call into the scheduler within the
critical section. This can result in two problems:
1) The access within the critical section will occur without the
required TTBR0 tables installed. This will fault, and where the
required tables permit access, the access will be retried without the
required tables, resulting in a livelock.
2) When TTBR0 SW PAN is in use, check_and_switch_context() does not
modify TTBR0, leaving a stale value installed. The mappings of the
blocked task will erroneously be accessible to regular accesses in
the context of the new task. Additionally, if the tables are
subsequently freed, local TLB maintenance required to reuse the ASID
may be lost, potentially resulting in TLB corruption (e.g. in the
presence of CnP).
The same issue exists for __raw_get_user() in the critical section
between uaccess_ttbr0_enable() and uaccess_ttbr0_disable().
A similar issue exists for __get_kernel_nofault() and
__put_kernel_nofault() for the critical section between
__uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the
TCO state is not context-switched by direct calls into the scheduler.
Here the TCO state may be lost from the context of the current task,
resulting in unexpected asynchronous tag check faults. It may also be
leaked to another task, suppressing expected tag check faults.
To fix all of these cases, we must ensure that we do not directly call
into the scheduler in their respective critical sections. This patch
reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and
__put_kernel_nofault(), ensuring that parameters are evaluated outside
of the critical sections. To make this requirement clear, comments are
added describing the problem, and line spaces added to separate the
critical sections from other portions of the macros.
For __raw_get_user() and __raw_put_user() the `err` parameter is
conditionally assigned to, and we must currently evaluate this in the
critical section. This behaviour is relied upon by the signal code,
which uses chains of put_user_error() and get_user_error(), checking the
return value at the end. In all cases, the `err` parameter is a plain
int rather than a more complex expression with a blocking call, so this
is safe.
In future we should try to clean up the `err` usage to remove the
potential for this to be a problem.
Aside from the changes to time of evaluation, there should be no
functional change as a result of this patch.
Reported-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com
Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-11-22 12:58:20 +00:00
|
|
|
__typeof__(dst) __pkn_dst = (dst); \
|
|
|
|
__typeof__(src) __pkn_src = (src); \
|
|
|
|
\
|
2024-06-01 15:04:53 -07:00
|
|
|
do { \
|
|
|
|
__label__ __pkn_err; \
|
|
|
|
__mte_enable_tco_async(); \
|
|
|
|
__raw_put_mem("str", *((type *)(__pkn_src)), \
|
|
|
|
(__force type *)(__pkn_dst), __pkn_err, K); \
|
|
|
|
__mte_disable_tco_async(); \
|
|
|
|
break; \
|
|
|
|
__pkn_err: \
|
|
|
|
__mte_disable_tco_async(); \
|
2020-12-02 13:15:53 +00:00
|
|
|
goto err_label; \
|
2024-06-01 15:04:53 -07:00
|
|
|
} while (0); \
|
2020-12-02 13:15:53 +00:00
|
|
|
} while(0)
|
|
|
|
|
2016-06-08 14:40:56 -07:00
|
|
|
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
2018-02-05 15:34:23 +00:00
|
|
|
#define raw_copy_from_user(to, from, n) \
|
|
|
|
({ \
|
2019-11-20 12:07:40 -05:00
|
|
|
unsigned long __acfu_ret; \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable(); \
|
2019-11-20 12:07:40 -05:00
|
|
|
__acfu_ret = __arch_copy_from_user((to), \
|
|
|
|
__uaccess_mask_ptr(from), (n)); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable(); \
|
2019-11-20 12:07:40 -05:00
|
|
|
__acfu_ret; \
|
2018-02-05 15:34:23 +00:00
|
|
|
})
|
|
|
|
|
2016-06-08 14:40:56 -07:00
|
|
|
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
2018-02-05 15:34:23 +00:00
|
|
|
#define raw_copy_to_user(to, from, n) \
|
|
|
|
({ \
|
2019-11-20 12:07:40 -05:00
|
|
|
unsigned long __actu_ret; \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable(); \
|
2019-11-20 12:07:40 -05:00
|
|
|
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
|
|
|
|
(from), (n)); \
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable(); \
|
2019-11-20 12:07:40 -05:00
|
|
|
__actu_ret; \
|
2018-02-05 15:34:23 +00:00
|
|
|
})
|
|
|
|
|
2024-06-09 10:11:04 -07:00
|
|
|
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
|
|
|
|
{
|
|
|
|
if (unlikely(!access_ok(ptr,len)))
|
|
|
|
return 0;
|
|
|
|
uaccess_ttbr0_enable();
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#define user_access_begin(a,b) user_access_begin(a,b)
|
|
|
|
#define user_access_end() uaccess_ttbr0_disable()
|
2024-06-01 15:04:53 -07:00
|
|
|
#define unsafe_put_user(x, ptr, label) \
|
|
|
|
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
|
2024-06-09 10:11:04 -07:00
|
|
|
#define unsafe_get_user(x, ptr, label) \
|
|
|
|
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* KCSAN uses these to save and restore ttbr state.
|
|
|
|
* We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
|
|
|
|
* they are no-ops.
|
|
|
|
*/
|
|
|
|
static inline unsigned long user_access_save(void) { return 0; }
|
|
|
|
static inline void user_access_restore(unsigned long enabled) { }
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want the unsafe accessors to always be inlined and use
|
|
|
|
* the error labels - thus the macro games.
|
|
|
|
*/
|
|
|
|
#define unsafe_copy_loop(dst, src, len, type, label) \
|
|
|
|
while (len >= sizeof(type)) { \
|
|
|
|
unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
|
|
|
|
dst += sizeof(type); \
|
|
|
|
src += sizeof(type); \
|
|
|
|
len -= sizeof(type); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define unsafe_copy_to_user(_dst,_src,_len,label) \
|
|
|
|
do { \
|
|
|
|
char __user *__ucu_dst = (_dst); \
|
|
|
|
const char *__ucu_src = (_src); \
|
|
|
|
size_t __ucu_len = (_len); \
|
|
|
|
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
|
|
|
|
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
|
|
|
|
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
|
|
|
|
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
|
|
|
|
} while (0)
|
|
|
|
|
2017-03-21 08:40:57 -04:00
|
|
|
#define INLINE_COPY_TO_USER
|
|
|
|
#define INLINE_COPY_FROM_USER
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2018-02-05 15:34:23 +00:00
|
|
|
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
|
|
|
|
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
|
2012-03-05 11:49:32 +00:00
|
|
|
{
|
2019-11-20 12:07:40 -05:00
|
|
|
if (access_ok(to, n)) {
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_enable();
|
2018-02-05 15:34:23 +00:00
|
|
|
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
|
2020-12-02 13:15:57 +00:00
|
|
|
uaccess_ttbr0_disable();
|
2019-11-20 12:07:40 -05:00
|
|
|
}
|
2012-03-05 11:49:32 +00:00
|
|
|
return n;
|
|
|
|
}
|
2018-02-05 15:34:23 +00:00
|
|
|
#define clear_user __clear_user
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2013-11-06 17:20:22 +00:00
|
|
|
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2013-11-06 17:20:22 +00:00
|
|
|
extern __must_check long strnlen_user(const char __user *str, long n);
|
2012-03-05 11:49:32 +00:00
|
|
|
|
2017-07-25 11:55:43 +01:00
|
|
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
|
|
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
|
|
|
|
|
|
|
|
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
|
|
{
|
|
|
|
kasan_check_write(dst, size);
|
2018-02-05 15:34:23 +00:00
|
|
|
return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
|
2017-07-25 11:55:43 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-04-23 11:07:50 +01:00
|
|
|
#ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return 0 on success, the number of bytes not probed otherwise.
|
|
|
|
*/
|
|
|
|
static inline size_t probe_subpage_writeable(const char __user *uaddr,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
if (!system_supports_mte())
|
|
|
|
return 0;
|
|
|
|
return mte_probe_user_range(uaddr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
|
|
|
|
|
2024-10-01 23:58:48 +01:00
|
|
|
#ifdef CONFIG_ARM64_GCS
|
|
|
|
|
|
|
|
static inline int gcssttr(unsigned long __user *addr, unsigned long val)
|
|
|
|
{
|
|
|
|
register unsigned long __user *_addr __asm__ ("x0") = addr;
|
|
|
|
register unsigned long _val __asm__ ("x1") = val;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/* GCSSTTR x1, x0 */
|
|
|
|
asm volatile(
|
|
|
|
"1: .inst 0xd91f1c01\n"
|
|
|
|
"2: \n"
|
|
|
|
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
|
|
|
|
: "+r" (err)
|
|
|
|
: "rZ" (_val), "r" (_addr)
|
|
|
|
: "memory");
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2024-10-01 23:58:49 +01:00
|
|
|
static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
|
|
|
|
int *err)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!access_ok((char __user *)addr, sizeof(u64))) {
|
|
|
|
*err = -EFAULT;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uaccess_ttbr0_enable();
|
|
|
|
ret = gcssttr(addr, val);
|
|
|
|
if (ret != 0)
|
|
|
|
*err = ret;
|
|
|
|
uaccess_ttbr0_disable();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2024-10-01 23:58:48 +01:00
|
|
|
#endif /* CONFIG_ARM64_GCS */
|
|
|
|
|
2012-03-05 11:49:32 +00:00
|
|
|
#endif /* __ASM_UACCESS_H */
|