mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Similarly to what was done with the memcpy() routines, make copy_to_user(), copy_from_user() and clear_user() also use the Armv8.8 FEAT_MOPS instructions. Both MOPS implementation options (A and B) are supported, including asymmetric systems. The exception fixup code fixes up the registers according to the option used. In case of a fault the routines return precisely how much was not copied (as required by the comment in include/linux/uaccess.h), as unprivileged versions of CPY/SET are guaranteed not to have written past the addresses reported in the GPRs. The MOPS instructions could possibly be inlined into callers (and patched to branch to the generic implementation if not detected; similarly to what x86 does), but as a first step this patch just uses them in the out-of-line routines. Signed-off-by: Kristina Martšenko <kristina.martsenko@arm.com> Acked-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/20250228170006.390100-4-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
72 lines
1.4 KiB
ArmAsm
72 lines
1.4 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2021 Arm Ltd.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/asm-uaccess.h>
|
|
|
|
.text
|
|
|
|
/* Prototype: int __arch_clear_user(void *addr, size_t sz)
|
|
* Purpose : clear some user memory
|
|
* Params : addr - user memory address to clear
|
|
* : sz - number of bytes to clear
|
|
* Returns : number of bytes NOT cleared
|
|
*
|
|
* Alignment fixed up by hardware.
|
|
*/
|
|
|
|
SYM_FUNC_START(__arch_clear_user)
|
|
add x2, x0, x1
|
|
|
|
#ifdef CONFIG_AS_HAS_MOPS
|
|
.arch_extension mops
|
|
alternative_if_not ARM64_HAS_MOPS
|
|
b .Lno_mops
|
|
alternative_else_nop_endif
|
|
|
|
USER(9f, setpt [x0]!, x1!, xzr)
|
|
USER(6f, setmt [x0]!, x1!, xzr)
|
|
USER(6f, setet [x0]!, x1!, xzr)
|
|
mov x0, #0
|
|
ret
|
|
.Lno_mops:
|
|
#endif
|
|
|
|
subs x1, x1, #8
|
|
b.mi 2f
|
|
|
|
1: .p2align 4
|
|
USER(9f, sttr xzr, [x0])
|
|
add x0, x0, #8
|
|
subs x1, x1, #8
|
|
b.hi 1b
|
|
USER(9f, sttr xzr, [x2, #-8])
|
|
mov x0, #0
|
|
ret
|
|
|
|
2: tbz x1, #2, 3f
|
|
USER(9f, sttr wzr, [x0])
|
|
USER(8f, sttr wzr, [x2, #-4])
|
|
mov x0, #0
|
|
ret
|
|
|
|
3: tbz x1, #1, 4f
|
|
USER(9f, sttrh wzr, [x0])
|
|
4: tbz x1, #0, 5f
|
|
USER(7f, sttrb wzr, [x2, #-1])
|
|
5: mov x0, #0
|
|
ret
|
|
|
|
// Exception fixups
|
|
6: b.cs 9f
|
|
// Registers are in Option A format
|
|
add x0, x0, x1
|
|
b 9f
|
|
7: sub x0, x2, #5 // Adjust for faulting on the final byte...
|
|
8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
|
|
9: sub x0, x2, x0
|
|
ret
|
|
SYM_FUNC_END(__arch_clear_user)
|
|
EXPORT_SYMBOL(__arch_clear_user)
|