mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

Instead of exposing the arm-optimized SHA-1 code via arm-specific crypto_shash algorithms, instead just implement the sha1_blocks() library function. This is much simpler, it makes the SHA-1 library functions be arm-optimized, and it fixes the longstanding issue where the arm-optimized SHA-1 code was disabled by default. SHA-1 still remains available through crypto_shash, but individual architectures no longer need to handle it. To match sha1_blocks(), change the type of the nblocks parameter of the assembly functions from int to size_t. The assembly functions actually already treated it as size_t. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20250712232329.818226-8-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
130 lines
4.3 KiB
Text
130 lines
4.3 KiB
Text
# SPDX-License-Identifier: GPL-2.0
|
|
|
|
menu "Accelerated Cryptographic Algorithms for CPU (arm)"
|
|
|
|
config CRYPTO_CURVE25519_NEON
|
|
tristate
|
|
depends on KERNEL_MODE_NEON
|
|
select CRYPTO_KPP
|
|
select CRYPTO_LIB_CURVE25519_GENERIC
|
|
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
|
|
default CRYPTO_LIB_CURVE25519_INTERNAL
|
|
help
|
|
Curve25519 algorithm
|
|
|
|
Architecture: arm with
|
|
- NEON (Advanced SIMD) extensions
|
|
|
|
config CRYPTO_GHASH_ARM_CE
|
|
tristate "Hash functions: GHASH (PMULL/NEON/ARMv8 Crypto Extensions)"
|
|
depends on KERNEL_MODE_NEON
|
|
select CRYPTO_AEAD
|
|
select CRYPTO_HASH
|
|
select CRYPTO_CRYPTD
|
|
select CRYPTO_LIB_AES
|
|
select CRYPTO_LIB_GF128MUL
|
|
help
|
|
GCM GHASH function (NIST SP800-38D)
|
|
|
|
Architecture: arm using
|
|
- PMULL (Polynomial Multiply Long) instructions
|
|
- NEON (Advanced SIMD) extensions
|
|
- ARMv8 Crypto Extensions
|
|
|
|
Use an implementation of GHASH (used by the GCM AEAD chaining mode)
|
|
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
|
|
that is part of the ARMv8 Crypto Extensions, or a slower variant that
|
|
uses the vmull.p8 instruction that is part of the basic NEON ISA.
|
|
|
|
config CRYPTO_NHPOLY1305_NEON
|
|
tristate "Hash functions: NHPoly1305 (NEON)"
|
|
depends on KERNEL_MODE_NEON
|
|
select CRYPTO_NHPOLY1305
|
|
help
|
|
NHPoly1305 hash function (Adiantum)
|
|
|
|
Architecture: arm using:
|
|
- NEON (Advanced SIMD) extensions
|
|
|
|
config CRYPTO_BLAKE2B_NEON
|
|
tristate "Hash functions: BLAKE2b (NEON)"
|
|
depends on KERNEL_MODE_NEON
|
|
select CRYPTO_BLAKE2B
|
|
help
|
|
BLAKE2b cryptographic hash function (RFC 7693)
|
|
|
|
Architecture: arm using
|
|
- NEON (Advanced SIMD) extensions
|
|
|
|
BLAKE2b digest algorithm optimized with ARM NEON instructions.
|
|
On ARM processors that have NEON support but not the ARMv8
|
|
Crypto Extensions, typically this BLAKE2b implementation is
|
|
much faster than the SHA-2 family and slightly faster than
|
|
SHA-1.
|
|
|
|
config CRYPTO_AES_ARM
|
|
tristate "Ciphers: AES"
|
|
select CRYPTO_ALGAPI
|
|
select CRYPTO_AES
|
|
help
|
|
Block ciphers: AES cipher algorithms (FIPS-197)
|
|
|
|
Architecture: arm
|
|
|
|
On ARM processors without the Crypto Extensions, this is the
|
|
fastest AES implementation for single blocks. For multiple
|
|
blocks, the NEON bit-sliced implementation is usually faster.
|
|
|
|
This implementation may be vulnerable to cache timing attacks,
|
|
since it uses lookup tables. However, as countermeasures it
|
|
disables IRQs and preloads the tables; it is hoped this makes
|
|
such attacks very difficult.
|
|
|
|
config CRYPTO_AES_ARM_BS
|
|
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
|
|
depends on KERNEL_MODE_NEON
|
|
select CRYPTO_AES_ARM
|
|
select CRYPTO_SKCIPHER
|
|
select CRYPTO_LIB_AES
|
|
help
|
|
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
|
|
with block cipher modes:
|
|
- ECB (Electronic Codebook) mode (NIST SP800-38A)
|
|
- CBC (Cipher Block Chaining) mode (NIST SP800-38A)
|
|
- CTR (Counter) mode (NIST SP800-38A)
|
|
- XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
|
|
and IEEE 1619)
|
|
|
|
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
|
|
and for XTS mode encryption, CBC and XTS mode decryption speedup is
|
|
around 25%. (CBC encryption speed is not affected by this driver.)
|
|
|
|
The bit sliced AES code does not use lookup tables, so it is believed
|
|
to be invulnerable to cache timing attacks. However, since the bit
|
|
sliced AES code cannot process single blocks efficiently, in certain
|
|
cases table-based code with some countermeasures against cache timing
|
|
attacks will still be used as a fallback method; specifically CBC
|
|
encryption (not CBC decryption), the encryption of XTS tweaks, XTS
|
|
ciphertext stealing when the message isn't a multiple of 16 bytes, and
|
|
CTR when invoked in a context in which NEON instructions are unusable.
|
|
|
|
config CRYPTO_AES_ARM_CE
|
|
tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)"
|
|
depends on KERNEL_MODE_NEON
|
|
select CRYPTO_SKCIPHER
|
|
select CRYPTO_LIB_AES
|
|
help
|
|
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
|
|
with block cipher modes:
|
|
- ECB (Electronic Codebook) mode (NIST SP800-38A)
|
|
- CBC (Cipher Block Chaining) mode (NIST SP800-38A)
|
|
- CTR (Counter) mode (NIST SP800-38A)
|
|
- CTS (Cipher Text Stealing) mode (NIST SP800-38A)
|
|
- XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
|
|
and IEEE 1619)
|
|
|
|
Architecture: arm using:
|
|
- ARMv8 Crypto Extensions
|
|
|
|
endmenu
|
|
|