mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
lib/crc: arm64: Migrate optimized CRC code into lib/crc/
Move the arm64-optimized CRC code from arch/arm64/lib/crc* into its new location in lib/crc/arm64/, and wire it up in the new way. This new way of organizing the CRC code eliminates the need to artificially split the code for each CRC variant into separate arch and generic modules, enabling better inlining and dead code elimination. For more details, see "lib/crc: Prepare for arch-optimized code in subdirs of lib/crc/". Reviewed-by: "Martin K. Petersen" <martin.petersen@oracle.com> Acked-by: Ingo Molnar <mingo@kernel.org> Acked-by: "Jason A. Donenfeld" <Jason@zx2c4.com> Link: https://lore.kernel.org/r/20250607200454.73587-5-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
This commit is contained in:
parent
530b304f00
commit
2b7531b2a2
8 changed files with 11 additions and 42 deletions
|
@ -21,8 +21,6 @@ config ARM64
|
|||
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
|
||||
select ARCH_HAS_CACHE_LINE_SIZE
|
||||
select ARCH_HAS_CC_PLATFORM
|
||||
select ARCH_HAS_CRC32
|
||||
select ARCH_HAS_CRC_T10DIF if KERNEL_MODE_NEON
|
||||
select ARCH_HAS_CURRENT_STACK_POINTER
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
|
|
|
@ -16,12 +16,6 @@ endif
|
|||
|
||||
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
|
||||
|
||||
obj-$(CONFIG_CRC32_ARCH) += crc32-arm64.o
|
||||
crc32-arm64-y := crc32.o crc32-core.o
|
||||
|
||||
obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-arm64.o
|
||||
crc-t10dif-arm64-y := crc-t10dif.o crc-t10dif-core.o
|
||||
|
||||
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
||||
|
||||
obj-$(CONFIG_ARM64_MTE) += mte.o
|
||||
|
|
|
@ -51,6 +51,7 @@ config CRC_T10DIF_ARCH
|
|||
bool
|
||||
depends on CRC_T10DIF && CRC_OPTIMIZATIONS
|
||||
default y if ARM && KERNEL_MODE_NEON
|
||||
default y if ARM64 && KERNEL_MODE_NEON
|
||||
|
||||
config CRC32
|
||||
tristate
|
||||
|
@ -66,6 +67,7 @@ config CRC32_ARCH
|
|||
bool
|
||||
depends on CRC32 && CRC_OPTIMIZATIONS
|
||||
default y if ARM && KERNEL_MODE_NEON
|
||||
default y if ARM64
|
||||
|
||||
config CRC64
|
||||
tristate
|
||||
|
|
|
@ -14,6 +14,7 @@ crc-t10dif-y := crc-t10dif-main.o
|
|||
ifeq ($(CONFIG_CRC_T10DIF_ARCH),y)
|
||||
CFLAGS_crc-t10dif-main.o += -I$(src)/$(SRCARCH)
|
||||
crc-t10dif-$(CONFIG_ARM) += arm/crc-t10dif-core.o
|
||||
crc-t10dif-$(CONFIG_ARM64) += arm64/crc-t10dif-core.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_CRC32) += crc32.o
|
||||
|
@ -21,6 +22,7 @@ crc32-y := crc32-main.o
|
|||
ifeq ($(CONFIG_CRC32_ARCH),y)
|
||||
CFLAGS_crc32-main.o += -I$(src)/$(SRCARCH)
|
||||
crc32-$(CONFIG_ARM) += arm/crc32-core.o
|
||||
crc32-$(CONFIG_ARM64) += arm64/crc32-core.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_CRC64) += crc64.o
|
||||
|
|
|
@ -6,11 +6,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crc-t10dif.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <crypto/internal/simd.h>
|
||||
|
||||
|
@ -26,7 +21,7 @@ asmlinkage void crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len,
|
|||
u8 out[16]);
|
||||
asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
|
||||
|
||||
u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
|
||||
static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
|
||||
{
|
||||
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
|
||||
if (static_branch_likely(&have_pmull)) {
|
||||
|
@ -50,24 +45,13 @@ u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
|
|||
}
|
||||
return crc_t10dif_generic(crc, data, length);
|
||||
}
|
||||
EXPORT_SYMBOL(crc_t10dif_arch);
|
||||
|
||||
static int __init crc_t10dif_arm64_init(void)
|
||||
#define crc_t10dif_mod_init_arch crc_t10dif_mod_init_arch
|
||||
static inline void crc_t10dif_mod_init_arch(void)
|
||||
{
|
||||
if (cpu_have_named_feature(ASIMD)) {
|
||||
static_branch_enable(&have_asimd);
|
||||
if (cpu_have_named_feature(PMULL))
|
||||
static_branch_enable(&have_pmull);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(crc_t10dif_arm64_init);
|
||||
|
||||
static void __exit crc_t10dif_arm64_exit(void)
|
||||
{
|
||||
}
|
||||
module_exit(crc_t10dif_arm64_exit);
|
||||
|
||||
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
||||
MODULE_DESCRIPTION("CRC-T10DIF using arm64 NEON and Crypto Extensions");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -1,9 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/neon.h>
|
||||
|
@ -22,7 +18,7 @@ asmlinkage u32 crc32_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
|
|||
asmlinkage u32 crc32c_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
|
||||
asmlinkage u32 crc32_be_arm64_4way(u32 crc, unsigned char const *p, size_t len);
|
||||
|
||||
u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
|
||||
static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
|
||||
{
|
||||
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
|
||||
return crc32_le_base(crc, p, len);
|
||||
|
@ -41,9 +37,8 @@ u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
|
|||
|
||||
return crc32_le_arm64(crc, p, len);
|
||||
}
|
||||
EXPORT_SYMBOL(crc32_le_arch);
|
||||
|
||||
u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
|
||||
static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
|
||||
{
|
||||
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
|
||||
return crc32c_base(crc, p, len);
|
||||
|
@ -62,9 +57,8 @@ u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
|
|||
|
||||
return crc32c_le_arm64(crc, p, len);
|
||||
}
|
||||
EXPORT_SYMBOL(crc32c_arch);
|
||||
|
||||
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
|
||||
static inline u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
|
||||
{
|
||||
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
|
||||
return crc32_be_base(crc, p, len);
|
||||
|
@ -83,9 +77,8 @@ u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
|
|||
|
||||
return crc32_be_arm64(crc, p, len);
|
||||
}
|
||||
EXPORT_SYMBOL(crc32_be_arch);
|
||||
|
||||
u32 crc32_optimizations(void)
|
||||
static inline u32 crc32_optimizations_arch(void)
|
||||
{
|
||||
if (alternative_has_cap_likely(ARM64_HAS_CRC32))
|
||||
return CRC32_LE_OPTIMIZATION |
|
||||
|
@ -93,7 +86,3 @@ u32 crc32_optimizations(void)
|
|||
CRC32C_OPTIMIZATION;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(crc32_optimizations);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("arm64-optimized CRC32 functions");
|
Loading…
Add table
Reference in a new issue