2019-06-04 10:11:33 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-03-10 09:47:48 +01:00
|
|
|
/*
|
|
|
|
* Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
|
|
|
|
*
|
2023-01-16 12:01:48 +01:00
|
|
|
* Copyright (C) 2015 - 2018 Linaro Ltd.
|
|
|
|
* Copyright (C) 2023 Google LLC.
|
2015-03-10 09:47:48 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/hwcap.h>
|
|
|
|
#include <asm/neon.h>
|
2023-01-16 12:01:48 +01:00
|
|
|
#include <crypto/aes.h>
|
2019-07-02 21:41:40 +02:00
|
|
|
#include <crypto/b128ops.h>
|
2025-04-18 10:58:52 +08:00
|
|
|
#include <crypto/gcm.h>
|
|
|
|
#include <crypto/gf128mul.h>
|
|
|
|
#include <crypto/ghash.h>
|
2023-01-16 12:01:48 +01:00
|
|
|
#include <crypto/internal/aead.h>
|
2015-03-10 09:47:48 +01:00
|
|
|
#include <crypto/internal/hash.h>
|
2023-01-16 12:01:48 +01:00
|
|
|
#include <crypto/internal/skcipher.h>
|
|
|
|
#include <crypto/scatterwalk.h>
|
2017-05-21 10:23:37 +00:00
|
|
|
#include <linux/cpufeature.h>
|
2025-04-18 10:58:52 +08:00
|
|
|
#include <linux/errno.h>
|
2020-06-29 09:39:25 +02:00
|
|
|
#include <linux/jump_label.h>
|
2025-04-18 10:58:52 +08:00
|
|
|
#include <linux/kernel.h>
|
2015-03-10 09:47:48 +01:00
|
|
|
#include <linux/module.h>
|
2025-04-18 10:58:52 +08:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/unaligned.h>
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2019-07-19 23:09:18 -07:00
|
|
|
MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions");
|
2023-01-16 12:01:48 +01:00
|
|
|
MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
|
|
|
|
MODULE_LICENSE("GPL");
|
2017-07-24 11:28:17 +01:00
|
|
|
MODULE_ALIAS_CRYPTO("ghash");
|
2023-01-16 12:01:48 +01:00
|
|
|
MODULE_ALIAS_CRYPTO("gcm(aes)");
|
|
|
|
MODULE_ALIAS_CRYPTO("rfc4106(gcm(aes))");
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2023-01-16 12:01:48 +01:00
|
|
|
#define RFC4106_NONCE_SIZE 4
|
|
|
|
|
2015-03-10 09:47:48 +01:00
|
|
|
struct ghash_key {
|
2019-07-02 21:41:40 +02:00
|
|
|
be128 k;
|
2020-06-29 09:39:25 +02:00
|
|
|
u64 h[][2];
|
2015-03-10 09:47:48 +01:00
|
|
|
};
|
|
|
|
|
2023-01-16 12:01:48 +01:00
|
|
|
struct gcm_key {
|
|
|
|
u64 h[4][2];
|
|
|
|
u32 rk[AES_MAX_KEYLENGTH_U32];
|
|
|
|
int rounds;
|
|
|
|
u8 nonce[]; // for RFC4106 nonce
|
|
|
|
};
|
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
struct arm_ghash_desc_ctx {
|
2015-03-10 09:47:48 +01:00
|
|
|
u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
|
|
|
|
};
|
|
|
|
|
2017-07-24 11:28:17 +01:00
|
|
|
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
|
2020-06-29 09:39:25 +02:00
|
|
|
u64 const h[][2], const char *head);
|
2017-07-24 11:28:17 +01:00
|
|
|
|
|
|
|
asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
|
2020-06-29 09:39:25 +02:00
|
|
|
u64 const h[][2], const char *head);
|
2017-07-24 11:28:17 +01:00
|
|
|
|
2020-06-29 09:39:25 +02:00
|
|
|
static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64);
|
2015-03-10 09:47:48 +01:00
|
|
|
|
|
|
|
static int ghash_init(struct shash_desc *desc)
|
|
|
|
{
|
2025-04-18 10:58:52 +08:00
|
|
|
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
*ctx = (struct arm_ghash_desc_ctx){};
|
2015-03-10 09:47:48 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-02 21:41:40 +02:00
|
|
|
static void ghash_do_update(int blocks, u64 dg[], const char *src,
|
|
|
|
struct ghash_key *key, const char *head)
|
|
|
|
{
|
2025-03-21 12:44:20 +08:00
|
|
|
kernel_neon_begin();
|
|
|
|
if (static_branch_likely(&use_p64))
|
|
|
|
pmull_ghash_update_p64(blocks, dg, src, key->h, head);
|
|
|
|
else
|
|
|
|
pmull_ghash_update_p8(blocks, dg, src, key->h, head);
|
|
|
|
kernel_neon_end();
|
2019-07-02 21:41:40 +02:00
|
|
|
}
|
|
|
|
|
2015-03-10 09:47:48 +01:00
|
|
|
static int ghash_update(struct shash_desc *desc, const u8 *src,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
2025-04-18 10:58:52 +08:00
|
|
|
struct ghash_key *key = crypto_shash_ctx(desc->tfm);
|
|
|
|
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
|
|
int blocks;
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
blocks = len / GHASH_BLOCK_SIZE;
|
|
|
|
ghash_do_update(blocks, ctx->digest, src, key, NULL);
|
|
|
|
return len - blocks * GHASH_BLOCK_SIZE;
|
|
|
|
}
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
static int ghash_export(struct shash_desc *desc, void *out)
|
|
|
|
{
|
|
|
|
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
|
|
u8 *dst = out;
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
put_unaligned_be64(ctx->digest[1], dst);
|
|
|
|
put_unaligned_be64(ctx->digest[0], dst + 8);
|
|
|
|
return 0;
|
|
|
|
}
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
static int ghash_import(struct shash_desc *desc, const void *in)
|
|
|
|
{
|
|
|
|
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
|
|
|
const u8 *src = in;
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
ctx->digest[1] = get_unaligned_be64(src);
|
|
|
|
ctx->digest[0] = get_unaligned_be64(src + 8);
|
2015-03-10 09:47:48 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
static int ghash_finup(struct shash_desc *desc, const u8 *src,
|
|
|
|
unsigned int len, u8 *dst)
|
2015-03-10 09:47:48 +01:00
|
|
|
{
|
2025-04-18 10:58:52 +08:00
|
|
|
struct ghash_key *key = crypto_shash_ctx(desc->tfm);
|
|
|
|
struct arm_ghash_desc_ctx *ctx = shash_desc_ctx(desc);
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
if (len) {
|
|
|
|
u8 buf[GHASH_BLOCK_SIZE] = {};
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2025-04-18 10:58:52 +08:00
|
|
|
memcpy(buf, src, len);
|
|
|
|
ghash_do_update(1, ctx->digest, buf, key, NULL);
|
|
|
|
memzero_explicit(buf, sizeof(buf));
|
2015-03-10 09:47:48 +01:00
|
|
|
}
|
2025-04-18 10:58:52 +08:00
|
|
|
return ghash_export(desc, dst);
|
2015-03-10 09:47:48 +01:00
|
|
|
}
|
|
|
|
|
2018-08-23 15:48:51 +01:00
|
|
|
static void ghash_reflect(u64 h[], const be128 *k)
|
|
|
|
{
|
|
|
|
u64 carry = be64_to_cpu(k->a) >> 63;
|
|
|
|
|
|
|
|
h[0] = (be64_to_cpu(k->b) << 1) | carry;
|
|
|
|
h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
|
|
|
|
|
|
|
|
if (carry)
|
|
|
|
h[1] ^= 0xc200000000000000UL;
|
|
|
|
}
|
|
|
|
|
2015-03-10 09:47:48 +01:00
|
|
|
static int ghash_setkey(struct crypto_shash *tfm,
|
|
|
|
const u8 *inkey, unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct ghash_key *key = crypto_shash_ctx(tfm);
|
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-30 21:19:36 -06:00
|
|
|
if (keylen != GHASH_BLOCK_SIZE)
|
2015-03-10 09:47:48 +01:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-02 21:41:40 +02:00
|
|
|
/* needed for the fallback */
|
|
|
|
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
|
2020-06-29 09:39:25 +02:00
|
|
|
ghash_reflect(key->h[0], &key->k);
|
2018-08-23 15:48:51 +01:00
|
|
|
|
2020-06-29 09:39:25 +02:00
|
|
|
if (static_branch_likely(&use_p64)) {
|
|
|
|
be128 h = key->k;
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2020-06-29 09:39:25 +02:00
|
|
|
gf128mul_lle(&h, &key->k);
|
|
|
|
ghash_reflect(key->h[1], &h);
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2020-06-29 09:39:25 +02:00
|
|
|
gf128mul_lle(&h, &key->k);
|
|
|
|
ghash_reflect(key->h[2], &h);
|
2015-03-10 09:47:48 +01:00
|
|
|
|
2020-06-29 09:39:25 +02:00
|
|
|
gf128mul_lle(&h, &key->k);
|
|
|
|
ghash_reflect(key->h[3], &h);
|
|
|
|
}
|
2015-03-10 09:47:48 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct shash_alg ghash_alg = {
|
|
|
|
.digestsize = GHASH_DIGEST_SIZE,
|
|
|
|
.init = ghash_init,
|
|
|
|
.update = ghash_update,
|
2025-04-18 10:58:52 +08:00
|
|
|
.finup = ghash_finup,
|
2015-03-10 09:47:48 +01:00
|
|
|
.setkey = ghash_setkey,
|
2025-04-18 10:58:52 +08:00
|
|
|
.export = ghash_export,
|
|
|
|
.import = ghash_import,
|
|
|
|
.descsize = sizeof(struct arm_ghash_desc_ctx),
|
|
|
|
.statesize = sizeof(struct ghash_desc_ctx),
|
2019-07-02 21:41:40 +02:00
|
|
|
|
|
|
|
.base.cra_name = "ghash",
|
2025-03-21 12:44:20 +08:00
|
|
|
.base.cra_driver_name = "ghash-ce",
|
|
|
|
.base.cra_priority = 300,
|
2025-04-18 10:58:52 +08:00
|
|
|
.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
|
2019-07-02 21:41:40 +02:00
|
|
|
.base.cra_blocksize = GHASH_BLOCK_SIZE,
|
2020-06-29 09:39:25 +02:00
|
|
|
.base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
|
2019-07-02 21:41:40 +02:00
|
|
|
.base.cra_module = THIS_MODULE,
|
2015-03-10 09:47:48 +01:00
|
|
|
};
|
|
|
|
|
2023-01-16 12:01:48 +01:00
|
|
|
void pmull_gcm_encrypt(int blocks, u64 dg[], const char *src,
|
|
|
|
struct gcm_key const *k, char *dst,
|
|
|
|
const char *iv, int rounds, u32 counter);
|
|
|
|
|
|
|
|
void pmull_gcm_enc_final(int blocks, u64 dg[], char *tag,
|
|
|
|
struct gcm_key const *k, char *head,
|
|
|
|
const char *iv, int rounds, u32 counter);
|
|
|
|
|
|
|
|
void pmull_gcm_decrypt(int bytes, u64 dg[], const char *src,
|
|
|
|
struct gcm_key const *k, char *dst,
|
|
|
|
const char *iv, int rounds, u32 counter);
|
|
|
|
|
|
|
|
int pmull_gcm_dec_final(int bytes, u64 dg[], char *tag,
|
|
|
|
struct gcm_key const *k, char *head,
|
|
|
|
const char *iv, int rounds, u32 counter,
|
|
|
|
const char *otag, int authsize);
|
|
|
|
|
|
|
|
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *inkey,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct gcm_key *ctx = crypto_aead_ctx(tfm);
|
|
|
|
struct crypto_aes_ctx aes_ctx;
|
|
|
|
be128 h, k;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = aes_expandkey(&aes_ctx, inkey, keylen);
|
|
|
|
if (ret)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
aes_encrypt(&aes_ctx, (u8 *)&k, (u8[AES_BLOCK_SIZE]){});
|
|
|
|
|
|
|
|
memcpy(ctx->rk, aes_ctx.key_enc, sizeof(ctx->rk));
|
|
|
|
ctx->rounds = 6 + keylen / 4;
|
|
|
|
|
|
|
|
memzero_explicit(&aes_ctx, sizeof(aes_ctx));
|
|
|
|
|
|
|
|
ghash_reflect(ctx->h[0], &k);
|
|
|
|
|
|
|
|
h = k;
|
|
|
|
gf128mul_lle(&h, &k);
|
|
|
|
ghash_reflect(ctx->h[1], &h);
|
|
|
|
|
|
|
|
gf128mul_lle(&h, &k);
|
|
|
|
ghash_reflect(ctx->h[2], &h);
|
|
|
|
|
|
|
|
gf128mul_lle(&h, &k);
|
|
|
|
ghash_reflect(ctx->h[3], &h);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
|
|
|
{
|
|
|
|
return crypto_gcm_check_authsize(authsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
|
|
|
|
int *buf_count, struct gcm_key *ctx)
|
|
|
|
{
|
|
|
|
if (*buf_count > 0) {
|
|
|
|
int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count);
|
|
|
|
|
|
|
|
memcpy(&buf[*buf_count], src, buf_added);
|
|
|
|
|
|
|
|
*buf_count += buf_added;
|
|
|
|
src += buf_added;
|
|
|
|
count -= buf_added;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
|
|
|
|
int blocks = count / GHASH_BLOCK_SIZE;
|
|
|
|
|
|
|
|
pmull_ghash_update_p64(blocks, dg, src, ctx->h,
|
|
|
|
*buf_count ? buf : NULL);
|
|
|
|
|
|
|
|
src += blocks * GHASH_BLOCK_SIZE;
|
|
|
|
count %= GHASH_BLOCK_SIZE;
|
|
|
|
*buf_count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count > 0) {
|
|
|
|
memcpy(buf, src, count);
|
|
|
|
*buf_count = count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len)
|
|
|
|
{
|
|
|
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
|
|
struct gcm_key *ctx = crypto_aead_ctx(aead);
|
|
|
|
u8 buf[GHASH_BLOCK_SIZE];
|
|
|
|
struct scatter_walk walk;
|
|
|
|
int buf_count = 0;
|
|
|
|
|
|
|
|
scatterwalk_start(&walk, req->src);
|
|
|
|
|
|
|
|
do {
|
2025-02-19 10:23:30 -08:00
|
|
|
unsigned int n;
|
2023-01-16 12:01:48 +01:00
|
|
|
|
2025-03-08 20:45:21 +08:00
|
|
|
n = scatterwalk_next(&walk, len);
|
|
|
|
gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx);
|
|
|
|
scatterwalk_done_src(&walk, n);
|
2023-01-16 12:01:48 +01:00
|
|
|
|
|
|
|
if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) {
|
|
|
|
kernel_neon_end();
|
|
|
|
kernel_neon_begin();
|
|
|
|
}
|
|
|
|
|
|
|
|
len -= n;
|
|
|
|
} while (len);
|
|
|
|
|
|
|
|
if (buf_count) {
|
|
|
|
memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
|
|
|
|
pmull_ghash_update_p64(1, dg, buf, ctx->h, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_encrypt(struct aead_request *req, const u8 *iv, u32 assoclen)
|
|
|
|
{
|
|
|
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
|
|
struct gcm_key *ctx = crypto_aead_ctx(aead);
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
u8 buf[AES_BLOCK_SIZE];
|
|
|
|
u32 counter = 2;
|
|
|
|
u64 dg[2] = {};
|
|
|
|
be128 lengths;
|
|
|
|
const u8 *src;
|
|
|
|
u8 *tag, *dst;
|
|
|
|
int tail, err;
|
|
|
|
|
|
|
|
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
|
|
|
|
if (assoclen)
|
|
|
|
gcm_calculate_auth_mac(req, dg, assoclen);
|
|
|
|
|
|
|
|
src = walk.src.virt.addr;
|
|
|
|
dst = walk.dst.virt.addr;
|
|
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
|
int nblocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
pmull_gcm_encrypt(nblocks, dg, src, ctx, dst, iv,
|
|
|
|
ctx->rounds, counter);
|
|
|
|
counter += nblocks;
|
|
|
|
|
|
|
|
if (walk.nbytes == walk.total) {
|
|
|
|
src += nblocks * AES_BLOCK_SIZE;
|
|
|
|
dst += nblocks * AES_BLOCK_SIZE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
|
walk.nbytes % AES_BLOCK_SIZE);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
src = walk.src.virt.addr;
|
|
|
|
dst = walk.dst.virt.addr;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lengths.a = cpu_to_be64(assoclen * 8);
|
|
|
|
lengths.b = cpu_to_be64(req->cryptlen * 8);
|
|
|
|
|
|
|
|
tag = (u8 *)&lengths;
|
|
|
|
tail = walk.nbytes % AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bounce via a buffer unless we are encrypting in place and src/dst
|
|
|
|
* are not pointing to the start of the walk buffer. In that case, we
|
|
|
|
* can do a NEON load/xor/store sequence in place as long as we move
|
|
|
|
* the plain/ciphertext and keystream to the start of the register. If
|
|
|
|
* not, do a memcpy() to the end of the buffer so we can reuse the same
|
|
|
|
* logic.
|
|
|
|
*/
|
|
|
|
if (unlikely(tail && (tail == walk.nbytes || src != dst)))
|
|
|
|
src = memcpy(buf + sizeof(buf) - tail, src, tail);
|
|
|
|
|
|
|
|
pmull_gcm_enc_final(tail, dg, tag, ctx, (u8 *)src, iv,
|
|
|
|
ctx->rounds, counter);
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
if (unlikely(tail && src != dst))
|
|
|
|
memcpy(dst, src, tail);
|
|
|
|
|
|
|
|
if (walk.nbytes) {
|
|
|
|
err = skcipher_walk_done(&walk, 0);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copy authtag to end of dst */
|
|
|
|
scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
|
|
|
|
crypto_aead_authsize(aead), 1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_decrypt(struct aead_request *req, const u8 *iv, u32 assoclen)
|
|
|
|
{
|
|
|
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
|
|
struct gcm_key *ctx = crypto_aead_ctx(aead);
|
|
|
|
int authsize = crypto_aead_authsize(aead);
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
u8 otag[AES_BLOCK_SIZE];
|
|
|
|
u8 buf[AES_BLOCK_SIZE];
|
|
|
|
u32 counter = 2;
|
|
|
|
u64 dg[2] = {};
|
|
|
|
be128 lengths;
|
|
|
|
const u8 *src;
|
|
|
|
u8 *tag, *dst;
|
|
|
|
int tail, err, ret;
|
|
|
|
|
|
|
|
scatterwalk_map_and_copy(otag, req->src,
|
|
|
|
req->assoclen + req->cryptlen - authsize,
|
|
|
|
authsize, 0);
|
|
|
|
|
|
|
|
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
|
|
|
|
if (assoclen)
|
|
|
|
gcm_calculate_auth_mac(req, dg, assoclen);
|
|
|
|
|
|
|
|
src = walk.src.virt.addr;
|
|
|
|
dst = walk.dst.virt.addr;
|
|
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
|
int nblocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
pmull_gcm_decrypt(nblocks, dg, src, ctx, dst, iv,
|
|
|
|
ctx->rounds, counter);
|
|
|
|
counter += nblocks;
|
|
|
|
|
|
|
|
if (walk.nbytes == walk.total) {
|
|
|
|
src += nblocks * AES_BLOCK_SIZE;
|
|
|
|
dst += nblocks * AES_BLOCK_SIZE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
|
walk.nbytes % AES_BLOCK_SIZE);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
src = walk.src.virt.addr;
|
|
|
|
dst = walk.dst.virt.addr;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
}
|
|
|
|
|
|
|
|
lengths.a = cpu_to_be64(assoclen * 8);
|
|
|
|
lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8);
|
|
|
|
|
|
|
|
tag = (u8 *)&lengths;
|
|
|
|
tail = walk.nbytes % AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
if (unlikely(tail && (tail == walk.nbytes || src != dst)))
|
|
|
|
src = memcpy(buf + sizeof(buf) - tail, src, tail);
|
|
|
|
|
|
|
|
ret = pmull_gcm_dec_final(tail, dg, tag, ctx, (u8 *)src, iv,
|
|
|
|
ctx->rounds, counter, otag, authsize);
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
if (unlikely(tail && src != dst))
|
|
|
|
memcpy(dst, src, tail);
|
|
|
|
|
|
|
|
if (walk.nbytes) {
|
|
|
|
err = skcipher_walk_done(&walk, 0);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret ? -EBADMSG : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_aes_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return gcm_encrypt(req, req->iv, req->assoclen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_aes_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return gcm_decrypt(req, req->iv, req->assoclen);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct gcm_key *ctx = crypto_aead_ctx(tfm);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
keylen -= RFC4106_NONCE_SIZE;
|
|
|
|
err = gcm_aes_setkey(tfm, inkey, keylen);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
|
|
|
{
|
|
|
|
return crypto_rfc4106_check_authsize(authsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rfc4106_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
|
|
struct gcm_key *ctx = crypto_aead_ctx(aead);
|
|
|
|
u8 iv[GCM_AES_IV_SIZE];
|
|
|
|
|
|
|
|
memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
|
|
|
|
memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
|
|
|
|
|
|
|
|
return crypto_ipsec_check_assoclen(req->assoclen) ?:
|
|
|
|
gcm_encrypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rfc4106_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
|
|
|
struct gcm_key *ctx = crypto_aead_ctx(aead);
|
|
|
|
u8 iv[GCM_AES_IV_SIZE];
|
|
|
|
|
|
|
|
memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
|
|
|
|
memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
|
|
|
|
|
|
|
|
return crypto_ipsec_check_assoclen(req->assoclen) ?:
|
|
|
|
gcm_decrypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct aead_alg gcm_aes_algs[] = {{
|
|
|
|
.ivsize = GCM_AES_IV_SIZE,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
|
|
|
.maxauthsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = gcm_aes_setkey,
|
|
|
|
.setauthsize = gcm_aes_setauthsize,
|
|
|
|
.encrypt = gcm_aes_encrypt,
|
|
|
|
.decrypt = gcm_aes_decrypt,
|
|
|
|
|
|
|
|
.base.cra_name = "gcm(aes)",
|
|
|
|
.base.cra_driver_name = "gcm-aes-ce",
|
|
|
|
.base.cra_priority = 400,
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct gcm_key),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
}, {
|
|
|
|
.ivsize = GCM_RFC4106_IV_SIZE,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
|
|
|
.maxauthsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = rfc4106_setkey,
|
|
|
|
.setauthsize = rfc4106_setauthsize,
|
|
|
|
.encrypt = rfc4106_encrypt,
|
|
|
|
.decrypt = rfc4106_decrypt,
|
|
|
|
|
|
|
|
.base.cra_name = "rfc4106(gcm(aes))",
|
|
|
|
.base.cra_driver_name = "rfc4106-gcm-aes-ce",
|
|
|
|
.base.cra_priority = 400,
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct gcm_key) + RFC4106_NONCE_SIZE,
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
}};
|
|
|
|
|
2015-03-10 09:47:48 +01:00
|
|
|
static int __init ghash_ce_mod_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2017-07-24 11:28:17 +01:00
|
|
|
if (!(elf_hwcap & HWCAP_NEON))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2020-06-29 09:39:25 +02:00
|
|
|
if (elf_hwcap2 & HWCAP2_PMULL) {
|
2023-01-16 12:01:48 +01:00
|
|
|
err = crypto_register_aeads(gcm_aes_algs,
|
|
|
|
ARRAY_SIZE(gcm_aes_algs));
|
|
|
|
if (err)
|
|
|
|
return err;
|
2020-06-29 09:39:25 +02:00
|
|
|
ghash_alg.base.cra_ctxsize += 3 * sizeof(u64[2]);
|
|
|
|
static_branch_enable(&use_p64);
|
|
|
|
}
|
2017-07-24 11:28:17 +01:00
|
|
|
|
2015-03-10 09:47:48 +01:00
|
|
|
err = crypto_register_shash(&ghash_alg);
|
|
|
|
if (err)
|
2023-01-16 12:01:48 +01:00
|
|
|
goto err_aead;
|
2015-03-10 09:47:48 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2023-01-16 12:01:48 +01:00
|
|
|
err_aead:
|
|
|
|
if (elf_hwcap2 & HWCAP2_PMULL)
|
|
|
|
crypto_unregister_aeads(gcm_aes_algs,
|
|
|
|
ARRAY_SIZE(gcm_aes_algs));
|
2015-03-10 09:47:48 +01:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ghash_ce_mod_exit(void)
|
|
|
|
{
|
|
|
|
crypto_unregister_shash(&ghash_alg);
|
2023-01-16 12:01:48 +01:00
|
|
|
if (elf_hwcap2 & HWCAP2_PMULL)
|
|
|
|
crypto_unregister_aeads(gcm_aes_algs,
|
|
|
|
ARRAY_SIZE(gcm_aes_algs));
|
2015-03-10 09:47:48 +01:00
|
|
|
}
|
|
|
|
|
2017-07-24 11:28:17 +01:00
|
|
|
module_init(ghash_ce_mod_init);
|
2015-03-10 09:47:48 +01:00
|
|
|
module_exit(ghash_ce_mod_exit);
|