2019-06-04 10:11:33 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-01-11 16:41:54 +00:00
|
|
|
/*
|
|
|
|
* Bit sliced AES using NEON instructions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/neon.h>
|
2019-07-02 21:41:39 +02:00
|
|
|
#include <asm/simd.h>
|
2017-01-11 16:41:54 +00:00
|
|
|
#include <crypto/aes.h>
|
|
|
|
#include <crypto/internal/skcipher.h>
|
2019-09-03 09:43:36 -07:00
|
|
|
#include <crypto/scatterwalk.h>
|
2017-01-11 16:41:54 +00:00
|
|
|
#include <crypto/xts.h>
|
|
|
|
#include <linux/module.h>
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
#include "aes-cipher.h"
|
2017-01-11 16:41:54 +00:00
|
|
|
|
|
|
|
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
|
2024-06-15 22:32:37 -07:00
|
|
|
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
|
2017-01-11 16:41:54 +00:00
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
|
|
|
|
MODULE_ALIAS_CRYPTO("ecb(aes)");
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
MODULE_ALIAS_CRYPTO("cbc(aes)");
|
2017-01-11 16:41:54 +00:00
|
|
|
MODULE_ALIAS_CRYPTO("ctr(aes)");
|
|
|
|
MODULE_ALIAS_CRYPTO("xts(aes)");
|
|
|
|
|
|
|
|
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
|
|
|
|
|
|
|
|
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
|
|
|
int rounds, int blocks);
|
|
|
|
asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
|
|
|
int rounds, int blocks);
|
|
|
|
|
|
|
|
asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
|
|
|
int rounds, int blocks, u8 iv[]);
|
|
|
|
|
|
|
|
asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
2022-01-27 12:35:43 +01:00
|
|
|
int rounds, int blocks, u8 ctr[]);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
|
|
|
asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
2019-09-03 09:43:36 -07:00
|
|
|
int rounds, int blocks, u8 iv[], int);
|
2017-01-11 16:41:54 +00:00
|
|
|
asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
2019-09-03 09:43:36 -07:00
|
|
|
int rounds, int blocks, u8 iv[], int);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
|
|
|
struct aesbs_ctx {
|
|
|
|
int rounds;
|
|
|
|
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct aesbs_cbc_ctx {
|
|
|
|
struct aesbs_ctx key;
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
struct crypto_aes_ctx fallback;
|
2017-01-11 16:41:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct aesbs_xts_ctx {
|
|
|
|
struct aesbs_ctx key;
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
struct crypto_aes_ctx fallback;
|
|
|
|
struct crypto_aes_ctx tweak_key;
|
2017-01-11 16:41:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct crypto_aes_ctx rk;
|
|
|
|
int err;
|
|
|
|
|
2019-07-02 21:41:29 +02:00
|
|
|
err = aes_expandkey(&rk, in_key, key_len);
|
2017-01-11 16:41:54 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
ctx->rounds = 6 + key_len / 4;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
|
|
|
aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
|
|
|
|
kernel_neon_end();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __ecb_crypt(struct skcipher_request *req,
|
|
|
|
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
|
|
|
|
int rounds, int blocks))
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
2019-09-03 09:43:24 -07:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
if (walk.nbytes < walk.total)
|
|
|
|
blocks = round_down(blocks,
|
|
|
|
walk.stride / AES_BLOCK_SIZE);
|
|
|
|
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_begin();
|
2017-01-11 16:41:54 +00:00
|
|
|
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
|
|
|
|
ctx->rounds, blocks);
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_end();
|
2017-01-11 16:41:54 +00:00
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
|
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ecb_encrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
return __ecb_crypt(req, aesbs_ecb_encrypt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ecb_decrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
return __ecb_crypt(req, aesbs_ecb_decrypt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
int err;
|
|
|
|
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
err = aes_expandkey(&ctx->fallback, in_key, key_len);
|
2017-01-11 16:41:54 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
ctx->key.rounds = 6 + key_len / 4;
|
|
|
|
|
|
|
|
kernel_neon_begin();
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
|
2017-01-11 16:41:54 +00:00
|
|
|
kernel_neon_end();
|
|
|
|
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
return 0;
|
2017-01-11 16:41:54 +00:00
|
|
|
}
|
|
|
|
|
2020-09-01 21:48:40 +10:00
|
|
|
static int cbc_encrypt(struct skcipher_request *req)
|
2017-01-11 16:41:54 +00:00
|
|
|
{
|
2020-09-01 21:48:40 +10:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes;
|
|
|
|
int err;
|
2017-01-11 16:41:54 +00:00
|
|
|
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
|
|
|
const u8 *src = walk.src.virt.addr;
|
|
|
|
u8 *dst = walk.dst.virt.addr;
|
|
|
|
u8 *prev = walk.iv;
|
|
|
|
|
|
|
|
do {
|
|
|
|
crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE);
|
|
|
|
__aes_arm_encrypt(ctx->fallback.key_enc,
|
|
|
|
ctx->key.rounds, dst, dst);
|
|
|
|
prev = dst;
|
|
|
|
src += AES_BLOCK_SIZE;
|
|
|
|
dst += AES_BLOCK_SIZE;
|
|
|
|
nbytes -= AES_BLOCK_SIZE;
|
|
|
|
} while (nbytes >= AES_BLOCK_SIZE);
|
|
|
|
memcpy(walk.iv, prev, AES_BLOCK_SIZE);
|
|
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
|
|
}
|
|
|
|
return err;
|
2017-01-11 16:41:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cbc_decrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
2019-09-03 09:43:24 -07:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
if (walk.nbytes < walk.total)
|
|
|
|
blocks = round_down(blocks,
|
|
|
|
walk.stride / AES_BLOCK_SIZE);
|
|
|
|
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_begin();
|
2017-01-11 16:41:54 +00:00
|
|
|
aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
|
|
|
ctx->key.rk, ctx->key.rounds, blocks,
|
|
|
|
walk.iv);
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_end();
|
2017-01-11 16:41:54 +00:00
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
|
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ctr_encrypt(struct skcipher_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
2017-02-02 11:38:56 +00:00
|
|
|
u8 buf[AES_BLOCK_SIZE];
|
2017-01-11 16:41:54 +00:00
|
|
|
int err;
|
|
|
|
|
2019-09-03 09:43:24 -07:00
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
|
|
|
while (walk.nbytes > 0) {
|
2022-01-27 12:35:43 +01:00
|
|
|
const u8 *src = walk.src.virt.addr;
|
|
|
|
u8 *dst = walk.dst.virt.addr;
|
crypto: arm/aes-neonbs - work around gcc-15 warning
I get a very rare -Wstringop-overread warning with gcc-15 for one function
in aesbs_ctr_encrypt():
arch/arm/crypto/aes-neonbs-glue.c: In function 'ctr_encrypt':
arch/arm/crypto/aes-neonbs-glue.c:212:1446: error: '__builtin_memcpy' offset [17, 2147483647] is out of the bounds [0, 16] of object 'buf' with type 'u8[16]' {aka 'unsigned char[16]'} [-Werror=array-bounds=]
212 | src = dst = memcpy(buf + sizeof(buf) - bytes,
arch/arm/crypto/aes-neonbs-glue.c: In function 'ctr_encrypt':
arch/arm/crypto/aes-neonbs-glue.c:218:17: error: 'aesbs_ctr_encrypt' reading 1 byte from a region of size 0 [-Werror=stringop-overread]
218 | aesbs_ctr_encrypt(dst, src, ctx->rk, ctx->rounds, bytes, walk.iv);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
arch/arm/crypto/aes-neonbs-glue.c:218:17: note: referencing argument 2 of type 'const u8[0]' {aka 'const unsigned char[]'}
arch/arm/crypto/aes-neonbs-glue.c:218:17: note: referencing argument 3 of type 'const u8[0]' {aka 'const unsigned char[]'}
arch/arm/crypto/aes-neonbs-glue.c:218:17: note: referencing argument 6 of type 'u8[0]' {aka 'unsigned char[]'}
arch/arm/crypto/aes-neonbs-glue.c:36:17: note: in a call to function 'aesbs_ctr_encrypt'
36 | asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
This could happen in theory if walk.nbytes is larger than INT_MAX and gets
converted to a negative local variable.
Keep the type unsigned like the orignal nbytes to be sure there is no
integer overflow.
Fixes: c8bf850e991a ("crypto: arm/aes-neonbs-ctr - deal with non-multiples of AES block size")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-06-10 11:32:52 +02:00
|
|
|
unsigned int bytes = walk.nbytes;
|
2017-01-11 16:41:54 +00:00
|
|
|
|
2022-01-27 12:35:43 +01:00
|
|
|
if (unlikely(bytes < AES_BLOCK_SIZE))
|
|
|
|
src = dst = memcpy(buf + sizeof(buf) - bytes,
|
|
|
|
src, bytes);
|
|
|
|
else if (walk.nbytes < walk.total)
|
|
|
|
bytes &= ~(8 * AES_BLOCK_SIZE - 1);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_begin();
|
2022-01-27 12:35:43 +01:00
|
|
|
aesbs_ctr_encrypt(dst, src, ctx->rk, ctx->rounds, bytes, walk.iv);
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_end();
|
2017-01-11 16:41:54 +00:00
|
|
|
|
2022-01-27 12:35:43 +01:00
|
|
|
if (unlikely(bytes < AES_BLOCK_SIZE))
|
|
|
|
memcpy(walk.dst.virt.addr,
|
|
|
|
buf + sizeof(buf) - bytes, bytes);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
2022-01-27 12:35:43 +01:00
|
|
|
err = skcipher_walk_done(&walk, walk.nbytes - bytes);
|
2017-01-11 16:41:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = xts_verify_key(tfm, in_key, key_len);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
key_len /= 2;
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
err = aes_expandkey(&ctx->fallback, in_key, key_len);
|
2019-09-03 09:43:36 -07:00
|
|
|
if (err)
|
|
|
|
return err;
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len);
|
2017-01-11 16:41:54 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return aesbs_setkey(tfm, in_key, key_len);
|
|
|
|
}
|
|
|
|
|
2019-09-03 09:43:36 -07:00
|
|
|
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
|
2017-01-11 16:41:54 +00:00
|
|
|
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
|
2019-09-03 09:43:36 -07:00
|
|
|
int rounds, int blocks, u8 iv[], int))
|
2017-01-11 16:41:54 +00:00
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
const int rounds = ctx->key.rounds;
|
2019-09-03 09:43:36 -07:00
|
|
|
int tail = req->cryptlen % AES_BLOCK_SIZE;
|
|
|
|
struct skcipher_request subreq;
|
|
|
|
u8 buf[2 * AES_BLOCK_SIZE];
|
2017-01-11 16:41:54 +00:00
|
|
|
struct skcipher_walk walk;
|
|
|
|
int err;
|
|
|
|
|
2019-09-03 09:43:36 -07:00
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (unlikely(tail)) {
|
|
|
|
skcipher_request_set_tfm(&subreq, tfm);
|
|
|
|
skcipher_request_set_callback(&subreq,
|
|
|
|
skcipher_request_flags(req),
|
|
|
|
NULL, NULL);
|
|
|
|
skcipher_request_set_crypt(&subreq, req->src, req->dst,
|
|
|
|
req->cryptlen - tail, req->iv);
|
|
|
|
req = &subreq;
|
|
|
|
}
|
|
|
|
|
2017-01-11 16:41:54 +00:00
|
|
|
err = skcipher_walk_virt(&walk, req, true);
|
2019-04-09 23:46:31 -07:00
|
|
|
if (err)
|
|
|
|
return err;
|
2017-01-11 16:41:54 +00:00
|
|
|
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
__aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv);
|
2017-01-11 16:41:54 +00:00
|
|
|
|
|
|
|
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
|
|
|
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
2019-09-03 09:43:36 -07:00
|
|
|
int reorder_last_tweak = !encrypt && tail > 0;
|
2017-01-11 16:41:54 +00:00
|
|
|
|
2019-09-03 09:43:36 -07:00
|
|
|
if (walk.nbytes < walk.total) {
|
2017-01-11 16:41:54 +00:00
|
|
|
blocks = round_down(blocks,
|
|
|
|
walk.stride / AES_BLOCK_SIZE);
|
2019-09-03 09:43:36 -07:00
|
|
|
reorder_last_tweak = 0;
|
|
|
|
}
|
2017-01-11 16:41:54 +00:00
|
|
|
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_begin();
|
2017-01-11 16:41:54 +00:00
|
|
|
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
rounds, blocks, walk.iv, reorder_last_tweak);
|
2019-09-03 09:43:24 -07:00
|
|
|
kernel_neon_end();
|
2017-01-11 16:41:54 +00:00
|
|
|
err = skcipher_walk_done(&walk,
|
|
|
|
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
2019-09-03 09:43:36 -07:00
|
|
|
if (err || likely(!tail))
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* handle ciphertext stealing */
|
|
|
|
scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
|
|
|
|
AES_BLOCK_SIZE, 0);
|
|
|
|
memcpy(buf + AES_BLOCK_SIZE, buf, tail);
|
|
|
|
scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0);
|
|
|
|
|
|
|
|
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
|
|
|
|
|
|
|
|
if (encrypt)
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
__aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf);
|
2019-09-03 09:43:36 -07:00
|
|
|
else
|
crypto: arm/aes-neonbs - go back to using aes-arm directly
In aes-neonbs, instead of going through the crypto API for the parts
that the bit-sliced AES code doesn't handle, namely AES-CBC encryption
and single-block AES, just call the ARM scalar AES cipher directly.
This basically goes back to the original approach that was used before
commit b56f5cbc7e08 ("crypto: arm/aes-neonbs - resolve fallback cipher
at runtime"). Calling the ARM scalar AES cipher directly is faster,
simpler, and avoids any chance of bugs specific to the use of fallback
ciphers such as module loading deadlocks which have happened twice. The
deadlocks turned out to be fixable in other ways, but there's no need to
rely on anything so fragile in the first place.
The rationale for the above-mentioned commit was to allow people to
choose to use a time-invariant AES implementation for the fallback
cipher. There are a couple problems with that rationale, though:
- In practice the ARM scalar AES cipher (aes-arm) was used anyway, since
it has a higher priority than aes-fixed-time. Users *could* go out of
their way to disable or blacklist aes-arm, or to lower its priority
using NETLINK_CRYPTO, but very few users customize the crypto API to
this extent. Systems with the ARMv8 Crypto Extensions used aes-ce,
but the bit-sliced algorithms are irrelevant on such systems anyway.
- Since commit 913a3aa07d16 ("crypto: arm/aes - add some hardening
against cache-timing attacks"), the ARM scalar AES cipher is partially
hardened against cache-timing attacks. It actually works like
aes-fixed-time, in that it disables interrupts and prefetches its
lookup table. It does use a larger table than aes-fixed-time, but
even so, it is not clear that aes-fixed-time is meaningfully more
time-invariant than aes-arm. And of course, the real solution for
time-invariant AES is to use a CPU that supports AES instructions.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2024-08-09 16:11:49 -07:00
|
|
|
__aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf);
|
2019-09-03 09:43:36 -07:00
|
|
|
|
|
|
|
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
|
|
|
|
|
|
|
|
scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
|
|
|
|
AES_BLOCK_SIZE + tail, 1);
|
|
|
|
return 0;
|
2017-01-11 16:41:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int xts_encrypt(struct skcipher_request *req)
|
|
|
|
{
|
2019-09-03 09:43:36 -07:00
|
|
|
return __xts_crypt(req, true, aesbs_xts_encrypt);
|
2017-01-11 16:41:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int xts_decrypt(struct skcipher_request *req)
|
|
|
|
{
|
2019-09-03 09:43:36 -07:00
|
|
|
return __xts_crypt(req, false, aesbs_xts_decrypt);
|
2017-01-11 16:41:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct skcipher_alg aes_algs[] = { {
|
2025-04-03 09:19:56 +02:00
|
|
|
.base.cra_name = "ecb(aes)",
|
|
|
|
.base.cra_driver_name = "ecb-aes-neonbs",
|
2017-01-11 16:41:54 +00:00
|
|
|
.base.cra_priority = 250,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.walksize = 8 * AES_BLOCK_SIZE,
|
|
|
|
.setkey = aesbs_setkey,
|
|
|
|
.encrypt = ecb_encrypt,
|
|
|
|
.decrypt = ecb_decrypt,
|
|
|
|
}, {
|
2025-04-03 09:19:56 +02:00
|
|
|
.base.cra_name = "cbc(aes)",
|
|
|
|
.base.cra_driver_name = "cbc-aes-neonbs",
|
2017-01-11 16:41:54 +00:00
|
|
|
.base.cra_priority = 250,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.walksize = 8 * AES_BLOCK_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = aesbs_cbc_setkey,
|
|
|
|
.encrypt = cbc_encrypt,
|
|
|
|
.decrypt = cbc_decrypt,
|
|
|
|
}, {
|
2025-04-03 09:19:56 +02:00
|
|
|
.base.cra_name = "ctr(aes)",
|
|
|
|
.base.cra_driver_name = "ctr-aes-neonbs",
|
2017-01-11 16:41:54 +00:00
|
|
|
.base.cra_priority = 250,
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
|
|
|
.walksize = 8 * AES_BLOCK_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = aesbs_setkey,
|
|
|
|
.encrypt = ctr_encrypt,
|
|
|
|
.decrypt = ctr_encrypt,
|
2019-07-02 21:41:39 +02:00
|
|
|
}, {
|
2025-04-03 09:19:56 +02:00
|
|
|
.base.cra_name = "xts(aes)",
|
|
|
|
.base.cra_driver_name = "xts-aes-neonbs",
|
2017-01-11 16:41:54 +00:00
|
|
|
.base.cra_priority = 250,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
|
|
|
|
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
|
|
|
.walksize = 8 * AES_BLOCK_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = aesbs_xts_setkey,
|
|
|
|
.encrypt = xts_encrypt,
|
|
|
|
.decrypt = xts_decrypt,
|
|
|
|
} };
|
|
|
|
|
|
|
|
static void aes_exit(void)
|
|
|
|
{
|
|
|
|
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init aes_init(void)
|
|
|
|
{
|
|
|
|
if (!(elf_hwcap & HWCAP_NEON))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2025-04-03 09:19:56 +02:00
|
|
|
return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
|
2017-01-11 16:41:54 +00:00
|
|
|
}
|
|
|
|
|
2025-04-30 16:17:02 +08:00
|
|
|
module_init(aes_init);
|
2017-01-11 16:41:54 +00:00
|
|
|
module_exit(aes_exit);
|