mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Move the contents of arch/s390/lib/crypto/ into lib/crypto/s390/. The new code organization makes a lot more sense for how this code actually works and is developed. In particular, it makes it possible to build each algorithm as a single module, with better inlining and dead code elimination. For a more detailed explanation, see the patchset which did this for the CRC library code: https://lore.kernel.org/r/20250607200454.73587-1-ebiggers@kernel.org/. Also see the patchset which did this for SHA-512: https://lore.kernel.org/linux-crypto/20250616014019.415791-1-ebiggers@kernel.org/ This is just a preparatory commit, which does the move to get the files into their new location but keeps them building the same way as before. Later commits will make the actual improvements to the way the arch-optimized code is integrated for each algorithm. Acked-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Sohil Mehta <sohil.mehta@intel.com> Link: https://lore.kernel.org/r/20250619191908.134235-7-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
56 lines
1.5 KiB
C
56 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* ChaCha stream cipher (s390 optimized)
|
|
*
|
|
* Copyright IBM Corp. 2021
|
|
*/
|
|
|
|
#define KMSG_COMPONENT "chacha_s390"
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
#include <crypto/chacha.h>
|
|
#include <linux/cpufeature.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sizes.h>
|
|
#include <asm/fpu.h>
|
|
#include "chacha-s390.h"
|
|
|
|
void hchacha_block_arch(const struct chacha_state *state,
|
|
u32 out[HCHACHA_OUT_WORDS], int nrounds)
|
|
{
|
|
/* TODO: implement hchacha_block_arch() in assembly */
|
|
hchacha_block_generic(state, out, nrounds);
|
|
}
|
|
EXPORT_SYMBOL(hchacha_block_arch);
|
|
|
|
void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
|
|
unsigned int bytes, int nrounds)
|
|
{
|
|
/* s390 chacha20 implementation has 20 rounds hard-coded,
|
|
* it cannot handle a block of data or less, but otherwise
|
|
* it can handle data of arbitrary size
|
|
*/
|
|
if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !cpu_has_vx()) {
|
|
chacha_crypt_generic(state, dst, src, bytes, nrounds);
|
|
} else {
|
|
DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
|
|
|
|
kernel_fpu_begin(&vxstate, KERNEL_VXR);
|
|
chacha20_vx(dst, src, bytes, &state->x[4], &state->x[12]);
|
|
kernel_fpu_end(&vxstate, KERNEL_VXR);
|
|
|
|
state->x[12] += round_up(bytes, CHACHA_BLOCK_SIZE) /
|
|
CHACHA_BLOCK_SIZE;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(chacha_crypt_arch);
|
|
|
|
bool chacha_is_arch_optimized(void)
|
|
{
|
|
return cpu_has_vx();
|
|
}
|
|
EXPORT_SYMBOL(chacha_is_arch_optimized);
|
|
|
|
MODULE_DESCRIPTION("ChaCha stream cipher (s390 optimized)");
|
|
MODULE_LICENSE("GPL v2");
|