mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
crypto: skcipher - Make skcipher_walk src.virt.addr const
Mark the src.virt.addr field in struct skcipher_walk as a pointer to const data. This guarantees that the user won't modify the data which should be done through dst.virt.addr to ensure that flushing is done when necessary. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
db873be6f0
commit
37d451809f
13 changed files with 33 additions and 32 deletions
|
@ -399,9 +399,9 @@ static int ctr_encrypt(struct skcipher_request *req)
|
|||
}
|
||||
if (walk.nbytes) {
|
||||
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
||||
const u8 *tsrc = walk.src.virt.addr;
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
u8 *tdst = walk.dst.virt.addr;
|
||||
u8 *tsrc = walk.src.virt.addr;
|
||||
|
||||
/*
|
||||
* Tell aes_ctr_encrypt() to process a tail block.
|
||||
|
|
|
@ -287,7 +287,8 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
|
|||
struct skcipher_walk walk;
|
||||
int nbytes, err;
|
||||
int first = 1;
|
||||
u8 *out, *in;
|
||||
const u8 *in;
|
||||
u8 *out;
|
||||
|
||||
if (req->cryptlen < AES_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -35,9 +35,9 @@ MODULE_ALIAS_CRYPTO("aes");
|
|||
asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
|
||||
void *key);
|
||||
asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
|
||||
asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
|
||||
asmlinkage void aes_p10_gcm_encrypt(const u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
|
||||
asmlinkage void aes_p10_gcm_decrypt(const u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
|
||||
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
|
||||
|
@ -261,7 +261,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
|
|||
return ret;
|
||||
|
||||
while ((nbytes = walk.nbytes) > 0 && ret == 0) {
|
||||
u8 *src = walk.src.virt.addr;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
|
||||
|
|
|
@ -69,9 +69,9 @@ static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|||
static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 keystream[AES_BLOCK_SIZE];
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
|
|
|
@ -321,7 +321,7 @@ static void ctr_crypt_final(const struct crypto_sparc64_aes_ctx *ctx,
|
|||
{
|
||||
u8 *ctrblk = walk->iv;
|
||||
u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
|
||||
u8 *src = walk->src.virt.addr;
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
|
|||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
u8 *wsrc = walk.src.virt.addr;
|
||||
const u8 *wsrc = walk.src.virt.addr;
|
||||
u8 *wdst = walk.dst.virt.addr;
|
||||
|
||||
/* Process four block batch */
|
||||
|
|
10
crypto/ctr.c
10
crypto/ctr.c
|
@ -33,7 +33,7 @@ static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
|
|||
u8 *ctrblk = walk->iv;
|
||||
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
|
||||
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
u8 *src = walk->src.virt.addr;
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
|
@ -50,7 +50,7 @@ static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
|
|||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
unsigned int bsize = crypto_cipher_blocksize(tfm);
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
|
@ -77,20 +77,20 @@ static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
|
|||
unsigned int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned long alignmask = crypto_cipher_alignmask(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
|
||||
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
|
||||
do {
|
||||
/* create keystream */
|
||||
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
|
||||
crypto_xor(src, keystream, bsize);
|
||||
crypto_xor(dst, keystream, bsize);
|
||||
|
||||
/* increment counter in counterblock */
|
||||
crypto_inc(ctrblk, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
return nbytes;
|
||||
|
|
|
@ -167,7 +167,7 @@ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
|
|||
|
||||
while (w.nbytes) {
|
||||
unsigned int avail = w.nbytes;
|
||||
be128 *wsrc;
|
||||
const be128 *wsrc;
|
||||
be128 *wdst;
|
||||
|
||||
wsrc = w.src.virt.addr;
|
||||
|
|
|
@ -22,8 +22,8 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
|
|||
struct crypto_cipher *tfm)
|
||||
{
|
||||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 * const iv = walk->iv;
|
||||
|
||||
|
@ -45,17 +45,17 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
|
|||
{
|
||||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 * const iv = walk->iv;
|
||||
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
|
||||
|
||||
do {
|
||||
memcpy(tmpbuf, src, bsize);
|
||||
crypto_xor(iv, src, bsize);
|
||||
crypto_cipher_encrypt_one(tfm, src, iv);
|
||||
crypto_xor_cpy(iv, tmpbuf, src, bsize);
|
||||
memcpy(tmpbuf, dst, bsize);
|
||||
crypto_xor(iv, dst, bsize);
|
||||
crypto_cipher_encrypt_one(tfm, dst, iv);
|
||||
crypto_xor_cpy(iv, tmpbuf, dst, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
return nbytes;
|
||||
|
@ -89,8 +89,8 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
|
|||
struct crypto_cipher *tfm)
|
||||
{
|
||||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
const u8 *src = walk->src.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 * const iv = walk->iv;
|
||||
|
||||
|
@ -112,17 +112,17 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
|
|||
{
|
||||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 * const iv = walk->iv;
|
||||
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
|
||||
|
||||
do {
|
||||
memcpy(tmpbuf, src, bsize);
|
||||
crypto_cipher_decrypt_one(tfm, src, src);
|
||||
crypto_xor(src, iv, bsize);
|
||||
crypto_xor_cpy(iv, src, tmpbuf, bsize);
|
||||
memcpy(tmpbuf, dst, bsize);
|
||||
crypto_cipher_decrypt_one(tfm, dst, dst);
|
||||
crypto_xor(dst, iv, bsize);
|
||||
crypto_xor_cpy(iv, dst, tmpbuf, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
return nbytes;
|
||||
|
|
|
@ -78,7 +78,7 @@ static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk,
|
|||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
unsigned long alignmask = crypto_cipher_alignmask(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *data = walk->src.virt.addr;
|
||||
u8 *data = walk->dst.virt.addr;
|
||||
u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
|
||||
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
|
||||
|
|
|
@ -99,7 +99,7 @@ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
|
|||
|
||||
while (w.nbytes) {
|
||||
unsigned int avail = w.nbytes;
|
||||
le128 *wsrc;
|
||||
const le128 *wsrc;
|
||||
le128 *wdst;
|
||||
|
||||
wsrc = w.src.virt.addr;
|
||||
|
|
|
@ -34,8 +34,8 @@ static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
|
|||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
u8 *src = walk.src.virt.addr;
|
||||
int nbytes = walk.nbytes;
|
||||
int tail = 0;
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ struct skcipher_walk {
|
|||
/* Virtual address of the source. */
|
||||
struct {
|
||||
struct {
|
||||
void *const addr;
|
||||
const void *const addr;
|
||||
} virt;
|
||||
} src;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue