mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

The current implementation is underperforming and in addition, it triggers misaligned access traps on platforms which do not handle misaligned accesses in hardware. Use the existing assembly routines to solve both problems at once. Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> Link: https://lore.kernel.org/r/20250602193918.868962-2-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
50 lines
1.3 KiB
C
50 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Copyright (C) 2023 SiFive
|
|
* Author: Andy Chiu <andy.chiu@sifive.com>
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <asm/asm.h>
|
|
|
|
#include <asm/vector.h>
|
|
#include <asm/simd.h>
|
|
|
|
#ifdef CONFIG_MMU
|
|
#include <asm/asm-prototypes.h>
|
|
#endif
|
|
|
|
#ifdef CONFIG_MMU
|
|
size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD;
|
|
int __asm_vector_usercopy(void *dst, void *src, size_t n);
|
|
int __asm_vector_usercopy_sum_enabled(void *dst, void *src, size_t n);
|
|
int fallback_scalar_usercopy(void *dst, void *src, size_t n);
|
|
int fallback_scalar_usercopy_sum_enabled(void *dst, void *src, size_t n);
|
|
asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n,
|
|
bool enable_sum)
|
|
{
|
|
size_t remain, copied;
|
|
|
|
/* skip has_vector() check because it has been done by the asm */
|
|
if (!may_use_simd())
|
|
goto fallback;
|
|
|
|
kernel_vector_begin();
|
|
remain = enable_sum ? __asm_vector_usercopy(dst, src, n) :
|
|
__asm_vector_usercopy_sum_enabled(dst, src, n);
|
|
kernel_vector_end();
|
|
|
|
if (remain) {
|
|
copied = n - remain;
|
|
dst += copied;
|
|
src += copied;
|
|
n = remain;
|
|
goto fallback;
|
|
}
|
|
|
|
return remain;
|
|
|
|
fallback:
|
|
return enable_sum ? fallback_scalar_usercopy(dst, src, n) :
|
|
fallback_scalar_usercopy_sum_enabled(dst, src, n);
|
|
}
|
|
#endif
|