mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

This is the main crypto library pull request for 6.17. The main focus this cycle is on reorganizing the SHA-1 and SHA-2 code, providing high-quality library APIs for SHA-1 and SHA-2 including HMAC support, and establishing conventions for lib/crypto/ going forward: - Migrate the SHA-1 and SHA-512 code (and also SHA-384 which shares most of the SHA-512 code) into lib/crypto/. This includes both the generic and architecture-optimized code. Greatly simplify how the architecture-optimized code is integrated. Add an easy-to-use library API for each SHA variant, including HMAC support. Finally, reimplement the crypto_shash support on top of the library API. - Apply the same reorganization to the SHA-256 code (and also SHA-224 which shares most of the SHA-256 code). This is a somewhat smaller change, due to my earlier work on SHA-256. But this brings in all the same additional improvements that I made for SHA-1 and SHA-512. There are also some smaller changes: - Move the architecture-optimized ChaCha, Poly1305, and BLAKE2s code from arch/$(SRCARCH)/lib/crypto/ to lib/crypto/$(SRCARCH)/. For these algorithms it's just a move, not a full reorganization yet. - Fix the MIPS chacha-core.S to build with the clang assembler. - Fix the Poly1305 functions to work in all contexts. - Fix a performance regression in the x86_64 Poly1305 code. - Clean up the x86_64 SHA-NI optimized SHA-1 assembly code. Note that since the new organization of the SHA code is much simpler, the diffstat of this pull request is negative, despite the addition of new fully-documented library APIs for multiple SHA and HMAC-SHA variants. These APIs will allow further simplifications across the kernel as users start using them instead of the old-school crypto API. (I've already written a lot of such conversion patches, removing over 1000 more lines of code. But most of those will target 6.18 or later.) -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQSacvsUNc7UX4ntmEPzXCl4vpKOKwUCaIZ93BQcZWJpZ2dlcnNA a2VybmVsLm9yZwAKCRDzXCl4vpKOK8HCAQD3O9P0qd6wscne5XuRwaybzKHQ2AqU OlhlDZWQQEvYAgD/aa6KP/DS+8RKGj0TBn6bACAJyXyDygFXq5a5s9pGzAs= =UmMM -----END PGP SIGNATURE----- Merge tag 'libcrypto-updates-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux Pull crypto library updates from Eric Biggers: "This is the main crypto library pull request for 6.17. The main focus this cycle is on reorganizing the SHA-1 and SHA-2 code, providing high-quality library APIs for SHA-1 and SHA-2 including HMAC support, and establishing conventions for lib/crypto/ going forward: - Migrate the SHA-1 and SHA-512 code (and also SHA-384 which shares most of the SHA-512 code) into lib/crypto/. This includes both the generic and architecture-optimized code. Greatly simplify how the architecture-optimized code is integrated. Add an easy-to-use library API for each SHA variant, including HMAC support. Finally, reimplement the crypto_shash support on top of the library API. - Apply the same reorganization to the SHA-256 code (and also SHA-224 which shares most of the SHA-256 code). This is a somewhat smaller change, due to my earlier work on SHA-256. But this brings in all the same additional improvements that I made for SHA-1 and SHA-512. There are also some smaller changes: - Move the architecture-optimized ChaCha, Poly1305, and BLAKE2s code from arch/$(SRCARCH)/lib/crypto/ to lib/crypto/$(SRCARCH)/. For these algorithms it's just a move, not a full reorganization yet. - Fix the MIPS chacha-core.S to build with the clang assembler. - Fix the Poly1305 functions to work in all contexts. - Fix a performance regression in the x86_64 Poly1305 code. - Clean up the x86_64 SHA-NI optimized SHA-1 assembly code. Note that since the new organization of the SHA code is much simpler, the diffstat of this pull request is negative, despite the addition of new fully-documented library APIs for multiple SHA and HMAC-SHA variants. These APIs will allow further simplifications across the kernel as users start using them instead of the old-school crypto API. (I've already written a lot of such conversion patches, removing over 1000 more lines of code. But most of those will target 6.18 or later)" * tag 'libcrypto-updates-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux: (67 commits) lib/crypto: arm64/sha512-ce: Drop compatibility macros for older binutils lib/crypto: x86/sha1-ni: Convert to use rounds macros lib/crypto: x86/sha1-ni: Minor optimizations and cleanup crypto: sha1 - Remove sha1_base.h lib/crypto: x86/sha1: Migrate optimized code into library lib/crypto: sparc/sha1: Migrate optimized code into library lib/crypto: s390/sha1: Migrate optimized code into library lib/crypto: powerpc/sha1: Migrate optimized code into library lib/crypto: mips/sha1: Migrate optimized code into library lib/crypto: arm64/sha1: Migrate optimized code into library lib/crypto: arm/sha1: Migrate optimized code into library crypto: sha1 - Use same state format as legacy drivers crypto: sha1 - Wrap library and add HMAC support lib/crypto: sha1: Add HMAC support lib/crypto: sha1: Add SHA-1 library functions lib/crypto: sha1: Rename sha1_init() to sha1_init_raw() crypto: x86/sha1 - Rename conflicting symbol lib/crypto: sha2: Add hmac_sha*_init_usingrawkey() lib/crypto: arm/poly1305: Remove unneeded empty weak function lib/crypto: x86/poly1305: Fix performance regression on short messages ...
64 lines
2 KiB
Makefile
64 lines
2 KiB
Makefile
# SPDX-License-Identifier: GPL-2.0
|
|
#
|
|
# Makefile for x86 specific library files.
|
|
#
|
|
|
|
# Produces uninteresting flaky coverage.
|
|
KCOV_INSTRUMENT_delay.o := n
|
|
|
|
# KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
|
|
KCSAN_SANITIZE_delay.o := n
|
|
ifdef CONFIG_KCSAN
|
|
# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
|
|
# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
|
|
CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
|
|
endif
|
|
|
|
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
|
|
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
|
|
quiet_cmd_inat_tables = GEN $@
|
|
cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
|
|
|
|
$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
|
|
$(call cmd,inat_tables)
|
|
|
|
$(obj)/inat.o: $(obj)/inat-tables.c
|
|
|
|
clean-files := inat-tables.c
|
|
|
|
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
|
|
|
|
lib-y := delay.o misc.o cmdline.o cpu.o
|
|
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
|
|
lib-y += memcpy_$(BITS).o
|
|
lib-y += pc-conf-reg.o
|
|
lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
|
|
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
|
|
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
|
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
|
lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o
|
|
|
|
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
|
|
obj-y += iomem.o
|
|
|
|
ifeq ($(CONFIG_X86_32),y)
|
|
obj-y += atomic64_32.o
|
|
lib-y += atomic64_cx8_32.o
|
|
lib-y += checksum_32.o
|
|
lib-y += strstr_32.o
|
|
lib-y += string_32.o
|
|
lib-y += memmove_32.o
|
|
lib-y += cmpxchg8b_emu.o
|
|
ifneq ($(CONFIG_X86_CX8),y)
|
|
lib-y += atomic64_386_32.o
|
|
endif
|
|
else
|
|
ifneq ($(CONFIG_GENERIC_CSUM),y)
|
|
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
|
|
endif
|
|
lib-y += clear_page_64.o copy_page_64.o
|
|
lib-y += memmove_64.o memset_64.o
|
|
lib-y += copy_user_64.o copy_user_uncached_64.o
|
|
lib-y += cmpxchg16b_emu.o
|
|
lib-y += bhi.o
|
|
endif
|