linux/lib/crypto/x86/sha1.h
Eric Biggers f3d6cb3dc0 lib/crypto: x86/sha1: Migrate optimized code into library
Instead of exposing the x86-optimized SHA-1 code via x86-specific
crypto_shash algorithms, instead just implement the sha1_blocks()
library function.  This is much simpler, it makes the SHA-1 library
functions be x86-optimized, and it fixes the longstanding issue where
the x86-optimized SHA-1 code was disabled by default.  SHA-1 still
remains available through crypto_shash, but individual architectures no
longer need to handle it.

To match sha1_blocks(), change the type of the nblocks parameter of the
assembly functions from int to size_t.  The assembly functions actually
already treated it as size_t.

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20250712232329.818226-14-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
2025-07-14 11:28:35 -07:00

74 lines
2.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SHA-1 optimized for x86_64
*
* Copyright 2025 Google LLC
*/
#include <asm/fpu/api.h>
#include <linux/static_call.h>
DEFINE_STATIC_CALL(sha1_blocks_x86, sha1_blocks_generic);
#define DEFINE_X86_SHA1_FN(c_fn, asm_fn) \
asmlinkage void asm_fn(struct sha1_block_state *state, \
const u8 *data, size_t nblocks); \
static void c_fn(struct sha1_block_state *state, \
const u8 *data, size_t nblocks) \
{ \
if (likely(irq_fpu_usable())) { \
kernel_fpu_begin(); \
asm_fn(state, data, nblocks); \
kernel_fpu_end(); \
} else { \
sha1_blocks_generic(state, data, nblocks); \
} \
}
DEFINE_X86_SHA1_FN(sha1_blocks_ssse3, sha1_transform_ssse3);
DEFINE_X86_SHA1_FN(sha1_blocks_avx, sha1_transform_avx);
DEFINE_X86_SHA1_FN(sha1_blocks_ni, sha1_ni_transform);
#define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */
asmlinkage void sha1_transform_avx2(struct sha1_block_state *state,
const u8 *data, size_t nblocks);
static void sha1_blocks_avx2(struct sha1_block_state *state,
const u8 *data, size_t nblocks)
{
if (likely(irq_fpu_usable())) {
kernel_fpu_begin();
/* Select the optimal transform based on the number of blocks */
if (nblocks >= SHA1_AVX2_BLOCK_OPTSIZE)
sha1_transform_avx2(state, data, nblocks);
else
sha1_transform_avx(state, data, nblocks);
kernel_fpu_end();
} else {
sha1_blocks_generic(state, data, nblocks);
}
}
static void sha1_blocks(struct sha1_block_state *state,
const u8 *data, size_t nblocks)
{
static_call(sha1_blocks_x86)(state, data, nblocks);
}
#define sha1_mod_init_arch sha1_mod_init_arch
static inline void sha1_mod_init_arch(void)
{
if (boot_cpu_has(X86_FEATURE_SHA_NI)) {
static_call_update(sha1_blocks_x86, sha1_blocks_ni);
} else if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
NULL) &&
boot_cpu_has(X86_FEATURE_AVX)) {
if (boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_BMI1) &&
boot_cpu_has(X86_FEATURE_BMI2))
static_call_update(sha1_blocks_x86, sha1_blocks_avx2);
else
static_call_update(sha1_blocks_x86, sha1_blocks_avx);
} else if (boot_cpu_has(X86_FEATURE_SSSE3)) {
static_call_update(sha1_blocks_x86, sha1_blocks_ssse3);
}
}