linux/lib/crypto/arm64/sha512.h
Eric Biggers 9f97707bdb lib/crypto: sha256: Remove sha256_blocks_simd()
Instead of having both sha256_blocks_arch() and sha256_blocks_simd(),
instead have just sha256_blocks_arch() which uses the most efficient
implementation that is available in the calling context.

This is simpler, as it reduces the API surface.  It's also safer, since
sha256_blocks_arch() just works in all contexts, including contexts
where the FPU/SIMD/vector registers cannot be used.  This doesn't mean
that SHA-256 computations *should* be done in such contexts, but rather
we should just do the right thing instead of corrupting a random task's
registers.  Eliminating this footgun and simplifying the code is well
worth the very small performance cost of doing the check.

Note: in the case of arm and arm64, what used to be sha256_blocks_arch()
is renamed back to its original name of sha256_block_data_order().
sha256_blocks_arch() is now used for the higher-level dispatch function.
This renaming also required an update to lib/crypto/arm64/sha512.h,
since sha2-armv8.pl is shared by both SHA-256 and SHA-512.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20250630160645.3198-5-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
2025-07-04 10:18:53 -07:00

46 lines
1.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* arm64-optimized SHA-512 block function
*
* Copyright 2025 Google LLC
*/
#include <asm/neon.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_sha512_insns);
asmlinkage void sha512_block_data_order(struct sha512_block_state *state,
const u8 *data, size_t nblocks);
asmlinkage size_t __sha512_ce_transform(struct sha512_block_state *state,
const u8 *data, size_t nblocks);
static void sha512_blocks(struct sha512_block_state *state,
const u8 *data, size_t nblocks)
{
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
static_branch_likely(&have_sha512_insns) &&
likely(crypto_simd_usable())) {
do {
size_t rem;
kernel_neon_begin();
rem = __sha512_ce_transform(state, data, nblocks);
kernel_neon_end();
data += (nblocks - rem) * SHA512_BLOCK_SIZE;
nblocks = rem;
} while (nblocks);
} else {
sha512_block_data_order(state, data, nblocks);
}
}
#ifdef CONFIG_KERNEL_MODE_NEON
#define sha512_mod_init_arch sha512_mod_init_arch
static inline void sha512_mod_init_arch(void)
{
if (cpu_have_named_feature(SHA512))
static_branch_enable(&have_sha512_insns);
}
#endif /* CONFIG_KERNEL_MODE_NEON */