mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00

Move the contents of arch/arm64/lib/crypto/ into lib/crypto/arm64/. The new code organization makes a lot more sense for how this code actually works and is developed. In particular, it makes it possible to build each algorithm as a single module, with better inlining and dead code elimination. For a more detailed explanation, see the patchset which did this for the CRC library code: https://lore.kernel.org/r/20250607200454.73587-1-ebiggers@kernel.org/. Also see the patchset which did this for SHA-512: https://lore.kernel.org/linux-crypto/20250616014019.415791-1-ebiggers@kernel.org/ This is just a preparatory commit, which does the move to get the files into their new location but keeps them building the same way as before. Later commits will make the actual improvements to the way the arch-optimized code is integrated for each algorithm. Add a gitignore entry for the removed directory arch/arm64/lib/crypto/ so that people don't accidentally commit leftover generated files. Acked-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Sohil Mehta <sohil.mehta@intel.com> Link: https://lore.kernel.org/r/20250619191908.134235-3-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
73 lines
2 KiB
C
73 lines
2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* OpenSSL/Cryptogams accelerated Poly1305 transform for arm64
|
|
*
|
|
* Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
|
*/
|
|
|
|
#include <asm/hwcap.h>
|
|
#include <asm/neon.h>
|
|
#include <crypto/internal/poly1305.h>
|
|
#include <linux/cpufeature.h>
|
|
#include <linux/jump_label.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/unaligned.h>
|
|
|
|
asmlinkage void poly1305_block_init_arch(
|
|
struct poly1305_block_state *state,
|
|
const u8 raw_key[POLY1305_BLOCK_SIZE]);
|
|
EXPORT_SYMBOL_GPL(poly1305_block_init_arch);
|
|
asmlinkage void poly1305_blocks(struct poly1305_block_state *state,
|
|
const u8 *src, u32 len, u32 hibit);
|
|
asmlinkage void poly1305_blocks_neon(struct poly1305_block_state *state,
|
|
const u8 *src, u32 len, u32 hibit);
|
|
asmlinkage void poly1305_emit_arch(const struct poly1305_state *state,
|
|
u8 digest[POLY1305_DIGEST_SIZE],
|
|
const u32 nonce[4]);
|
|
EXPORT_SYMBOL_GPL(poly1305_emit_arch);
|
|
|
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
|
|
|
void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
|
|
unsigned int len, u32 padbit)
|
|
{
|
|
len = round_down(len, POLY1305_BLOCK_SIZE);
|
|
if (static_branch_likely(&have_neon)) {
|
|
do {
|
|
unsigned int todo = min_t(unsigned int, len, SZ_4K);
|
|
|
|
kernel_neon_begin();
|
|
poly1305_blocks_neon(state, src, todo, padbit);
|
|
kernel_neon_end();
|
|
|
|
len -= todo;
|
|
src += todo;
|
|
} while (len);
|
|
} else
|
|
poly1305_blocks(state, src, len, padbit);
|
|
}
|
|
EXPORT_SYMBOL_GPL(poly1305_blocks_arch);
|
|
|
|
bool poly1305_is_arch_optimized(void)
|
|
{
|
|
/* We always can use at least the ARM64 scalar implementation. */
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(poly1305_is_arch_optimized);
|
|
|
|
static int __init neon_poly1305_mod_init(void)
|
|
{
|
|
if (cpu_have_named_feature(ASIMD))
|
|
static_branch_enable(&have_neon);
|
|
return 0;
|
|
}
|
|
subsys_initcall(neon_poly1305_mod_init);
|
|
|
|
static void __exit neon_poly1305_mod_exit(void)
|
|
{
|
|
}
|
|
module_exit(neon_poly1305_mod_exit);
|
|
|
|
MODULE_DESCRIPTION("Poly1305 authenticator (ARM64 optimized)");
|
|
MODULE_LICENSE("GPL v2");
|