mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Move the contents of arch/arm64/lib/crypto/ into lib/crypto/arm64/. The new code organization makes a lot more sense for how this code actually works and is developed. In particular, it makes it possible to build each algorithm as a single module, with better inlining and dead code elimination. For a more detailed explanation, see the patchset which did this for the CRC library code: https://lore.kernel.org/r/20250607200454.73587-1-ebiggers@kernel.org/. Also see the patchset which did this for SHA-512: https://lore.kernel.org/linux-crypto/20250616014019.415791-1-ebiggers@kernel.org/ This is just a preparatory commit, which does the move to get the files into their new location but keeps them building the same way as before. Later commits will make the actual improvements to the way the arch-optimized code is integrated for each algorithm. Add a gitignore entry for the removed directory arch/arm64/lib/crypto/ so that people don't accidentally commit leftover generated files. Acked-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Sohil Mehta <sohil.mehta@intel.com> Link: https://lore.kernel.org/r/20250619191908.134235-3-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
119 lines
3.2 KiB
C
119 lines
3.2 KiB
C
/*
|
|
* ChaCha and HChaCha functions (ARM64 optimized)
|
|
*
|
|
* Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* Based on:
|
|
* ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
|
|
*
|
|
* Copyright (C) 2015 Martin Willi
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*/
|
|
|
|
#include <crypto/chacha.h>
|
|
#include <crypto/internal/simd.h>
|
|
#include <linux/jump_label.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/hwcap.h>
|
|
#include <asm/neon.h>
|
|
#include <asm/simd.h>
|
|
|
|
asmlinkage void chacha_block_xor_neon(const struct chacha_state *state,
|
|
u8 *dst, const u8 *src, int nrounds);
|
|
asmlinkage void chacha_4block_xor_neon(const struct chacha_state *state,
|
|
u8 *dst, const u8 *src,
|
|
int nrounds, int bytes);
|
|
asmlinkage void hchacha_block_neon(const struct chacha_state *state,
|
|
u32 out[HCHACHA_OUT_WORDS], int nrounds);
|
|
|
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
|
|
|
|
static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src,
|
|
int bytes, int nrounds)
|
|
{
|
|
while (bytes > 0) {
|
|
int l = min(bytes, CHACHA_BLOCK_SIZE * 5);
|
|
|
|
if (l <= CHACHA_BLOCK_SIZE) {
|
|
u8 buf[CHACHA_BLOCK_SIZE];
|
|
|
|
memcpy(buf, src, l);
|
|
chacha_block_xor_neon(state, buf, buf, nrounds);
|
|
memcpy(dst, buf, l);
|
|
state->x[12] += 1;
|
|
break;
|
|
}
|
|
chacha_4block_xor_neon(state, dst, src, nrounds, l);
|
|
bytes -= l;
|
|
src += l;
|
|
dst += l;
|
|
state->x[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
|
|
}
|
|
}
|
|
|
|
void hchacha_block_arch(const struct chacha_state *state,
|
|
u32 out[HCHACHA_OUT_WORDS], int nrounds)
|
|
{
|
|
if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) {
|
|
hchacha_block_generic(state, out, nrounds);
|
|
} else {
|
|
kernel_neon_begin();
|
|
hchacha_block_neon(state, out, nrounds);
|
|
kernel_neon_end();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(hchacha_block_arch);
|
|
|
|
void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
|
|
unsigned int bytes, int nrounds)
|
|
{
|
|
if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
|
|
!crypto_simd_usable())
|
|
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
|
|
|
|
do {
|
|
unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
|
|
|
|
kernel_neon_begin();
|
|
chacha_doneon(state, dst, src, todo, nrounds);
|
|
kernel_neon_end();
|
|
|
|
bytes -= todo;
|
|
src += todo;
|
|
dst += todo;
|
|
} while (bytes);
|
|
}
|
|
EXPORT_SYMBOL(chacha_crypt_arch);
|
|
|
|
bool chacha_is_arch_optimized(void)
|
|
{
|
|
return static_key_enabled(&have_neon);
|
|
}
|
|
EXPORT_SYMBOL(chacha_is_arch_optimized);
|
|
|
|
static int __init chacha_simd_mod_init(void)
|
|
{
|
|
if (cpu_have_named_feature(ASIMD))
|
|
static_branch_enable(&have_neon);
|
|
return 0;
|
|
}
|
|
subsys_initcall(chacha_simd_mod_init);
|
|
|
|
static void __exit chacha_simd_mod_exit(void)
|
|
{
|
|
}
|
|
module_exit(chacha_simd_mod_exit);
|
|
|
|
MODULE_DESCRIPTION("ChaCha and HChaCha functions (ARM64 optimized)");
|
|
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
|
MODULE_LICENSE("GPL v2");
|