linux/lib/crypto/arm/sha512.h
Eric Biggers 24c91b62ac lib/crypto: arm/sha512: Migrate optimized SHA-512 code to library
Instead of exposing the arm-optimized SHA-512 code via arm-specific
crypto_shash algorithms, instead just implement the sha512_blocks()
library function.  This is much simpler, it makes the SHA-512 (and
SHA-384) library functions be arm-optimized, and it fixes the
longstanding issue where the arm-optimized SHA-512 code was disabled by
default.  SHA-512 still remains available through crypto_shash, but
individual architectures no longer need to handle it.

To match sha512_blocks(), change the type of the nblocks parameter of
the assembly functions from int to size_t.  The assembly functions
actually already treated it as size_t.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20250630160320.2888-8-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
2025-06-30 09:26:19 -07:00

38 lines
1.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* arm32-optimized SHA-512 block function
*
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
#include <crypto/internal/simd.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
asmlinkage void sha512_block_data_order(struct sha512_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage void sha512_block_data_order_neon(struct sha512_block_state *state,
const u8 *data, size_t nblocks);
static void sha512_blocks(struct sha512_block_state *state,
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_neon) && likely(crypto_simd_usable())) {
kernel_neon_begin();
sha512_block_data_order_neon(state, data, nblocks);
kernel_neon_end();
} else {
sha512_block_data_order(state, data, nblocks);
}
}
#ifdef CONFIG_KERNEL_MODE_NEON
#define sha512_mod_init_arch sha512_mod_init_arch
static inline void sha512_mod_init_arch(void)
{
if (cpu_has_neon())
static_branch_enable(&have_neon);
}
#endif /* CONFIG_KERNEL_MODE_NEON */