mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
crypto: ahash - Remove request chaining
Request chaining requires the user to do too much book keeping. Remove it from ahash. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
69e5a1228d
commit
5bb61dc76d
5 changed files with 32 additions and 174 deletions
169
crypto/ahash.c
169
crypto/ahash.c
|
@ -43,10 +43,7 @@ struct crypto_hash_walk {
|
|||
};
|
||||
|
||||
struct ahash_save_req_state {
|
||||
struct list_head head;
|
||||
struct ahash_request *req0;
|
||||
struct ahash_request *cur;
|
||||
int (*op)(struct ahash_request *req);
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
struct scatterlist sg;
|
||||
|
@ -54,9 +51,9 @@ struct ahash_save_req_state {
|
|||
u8 *page;
|
||||
unsigned int offset;
|
||||
unsigned int nbytes;
|
||||
bool update;
|
||||
};
|
||||
|
||||
static void ahash_reqchain_done(void *data, int err);
|
||||
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
|
||||
static void ahash_restore_req(struct ahash_request *req);
|
||||
static void ahash_def_finup_done1(void *data, int err);
|
||||
|
@ -313,21 +310,17 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
|
||||
|
||||
static bool ahash_request_hasvirt(struct ahash_request *req)
|
||||
{
|
||||
return ahash_request_isvirt(req);
|
||||
}
|
||||
|
||||
static int ahash_reqchain_virt(struct ahash_save_req_state *state,
|
||||
int err, u32 mask)
|
||||
{
|
||||
struct ahash_request *req = state->cur;
|
||||
struct ahash_request *req = state->req0;
|
||||
struct crypto_ahash *tfm;
|
||||
|
||||
tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
for (;;) {
|
||||
unsigned len = state->nbytes;
|
||||
|
||||
req->base.err = err;
|
||||
|
||||
if (!state->offset)
|
||||
break;
|
||||
|
||||
|
@ -346,10 +339,9 @@ static int ahash_reqchain_virt(struct ahash_save_req_state *state,
|
|||
state->offset += len;
|
||||
req->nbytes = len;
|
||||
|
||||
err = state->op(req);
|
||||
err = crypto_ahash_alg(tfm)->update(req);
|
||||
if (err == -EINPROGRESS) {
|
||||
if (!list_empty(&state->head) ||
|
||||
state->offset < state->nbytes)
|
||||
if (state->offset < state->nbytes)
|
||||
err = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
@ -365,64 +357,12 @@ static int ahash_reqchain_finish(struct ahash_request *req0,
|
|||
struct ahash_save_req_state *state,
|
||||
int err, u32 mask)
|
||||
{
|
||||
struct ahash_request *req = state->cur;
|
||||
struct crypto_ahash *tfm;
|
||||
struct ahash_request *n;
|
||||
bool update;
|
||||
u8 *page;
|
||||
|
||||
err = ahash_reqchain_virt(state, err, mask);
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
goto out;
|
||||
|
||||
if (req != req0)
|
||||
list_add_tail(&req->base.list, &req0->base.list);
|
||||
|
||||
tfm = crypto_ahash_reqtfm(req);
|
||||
update = state->op == crypto_ahash_alg(tfm)->update;
|
||||
|
||||
list_for_each_entry_safe(req, n, &state->head, base.list) {
|
||||
list_del_init(&req->base.list);
|
||||
|
||||
req->base.flags &= mask;
|
||||
req->base.complete = ahash_reqchain_done;
|
||||
req->base.data = state;
|
||||
state->cur = req;
|
||||
|
||||
if (update && ahash_request_isvirt(req) && req->nbytes) {
|
||||
unsigned len = req->nbytes;
|
||||
u8 *result = req->result;
|
||||
|
||||
state->src = req->svirt;
|
||||
state->nbytes = len;
|
||||
|
||||
len = min(PAGE_SIZE, len);
|
||||
|
||||
memcpy(state->page, req->svirt, len);
|
||||
state->offset = len;
|
||||
|
||||
ahash_request_set_crypt(req, &state->sg, result, len);
|
||||
}
|
||||
|
||||
err = state->op(req);
|
||||
|
||||
if (err == -EINPROGRESS) {
|
||||
if (!list_empty(&state->head) ||
|
||||
state->offset < state->nbytes)
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (err == -EBUSY)
|
||||
goto out;
|
||||
|
||||
err = ahash_reqchain_virt(state, err, mask);
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
goto out;
|
||||
|
||||
list_add_tail(&req->base.list, &req0->base.list);
|
||||
}
|
||||
|
||||
page = state->page;
|
||||
if (page) {
|
||||
memset(page, 0, PAGE_SIZE);
|
||||
|
@ -442,7 +382,7 @@ static void ahash_reqchain_done(void *data, int err)
|
|||
data = state->data;
|
||||
|
||||
if (err == -EINPROGRESS) {
|
||||
if (!list_empty(&state->head) || state->offset < state->nbytes)
|
||||
if (state->offset < state->nbytes)
|
||||
return;
|
||||
goto notify;
|
||||
}
|
||||
|
@ -467,21 +407,14 @@ static int ahash_do_req_chain(struct ahash_request *req,
|
|||
int err;
|
||||
|
||||
if (crypto_ahash_req_chain(tfm) ||
|
||||
(!ahash_request_chained(req) &&
|
||||
(!update || !ahash_request_isvirt(req))))
|
||||
!update || !ahash_request_isvirt(req))
|
||||
return op(req);
|
||||
|
||||
if (update && ahash_request_hasvirt(req)) {
|
||||
gfp_t gfp;
|
||||
u32 flags;
|
||||
|
||||
flags = ahash_request_flags(req);
|
||||
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
page = (void *)__get_free_page(gfp);
|
||||
if (update && ahash_request_isvirt(req)) {
|
||||
page = (void *)__get_free_page(GFP_ATOMIC);
|
||||
err = -ENOMEM;
|
||||
if (!page)
|
||||
goto out_set_chain;
|
||||
goto out;
|
||||
}
|
||||
|
||||
state = &state0;
|
||||
|
@ -493,12 +426,10 @@ static int ahash_do_req_chain(struct ahash_request *req,
|
|||
state = req->base.data;
|
||||
}
|
||||
|
||||
state->op = op;
|
||||
state->cur = req;
|
||||
state->update = update;
|
||||
state->page = page;
|
||||
state->offset = 0;
|
||||
state->nbytes = 0;
|
||||
INIT_LIST_HEAD(&state->head);
|
||||
|
||||
if (page)
|
||||
sg_init_one(&state->sg, page, PAGE_SIZE);
|
||||
|
@ -519,16 +450,18 @@ static int ahash_do_req_chain(struct ahash_request *req,
|
|||
}
|
||||
|
||||
err = op(req);
|
||||
if (err == -EBUSY || err == -EINPROGRESS)
|
||||
return -EBUSY;
|
||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
||||
if (state->offset < state->nbytes)
|
||||
err = -EBUSY;
|
||||
return err;
|
||||
}
|
||||
|
||||
return ahash_reqchain_finish(req, state, err, ~0);
|
||||
|
||||
out_free_page:
|
||||
free_page((unsigned long)page);
|
||||
|
||||
out_set_chain:
|
||||
req->base.err = err;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -536,17 +469,10 @@ int crypto_ahash_init(struct ahash_request *req)
|
|||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
int err;
|
||||
|
||||
err = crypto_shash_init(prepare_shash_desc(req, tfm));
|
||||
req->base.err = err;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (likely(tfm->using_shash))
|
||||
return crypto_shash_init(prepare_shash_desc(req, tfm));
|
||||
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||
return -ENOKEY;
|
||||
|
||||
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_init);
|
||||
|
@ -555,15 +481,11 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
|
|||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct ahash_save_req_state *state;
|
||||
gfp_t gfp;
|
||||
u32 flags;
|
||||
|
||||
if (!ahash_is_async(tfm))
|
||||
return 0;
|
||||
|
||||
flags = ahash_request_flags(req);
|
||||
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
|
||||
state = kmalloc(sizeof(*state), gfp);
|
||||
state = kmalloc(sizeof(*state), GFP_ATOMIC);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -596,14 +518,8 @@ int crypto_ahash_update(struct ahash_request *req)
|
|||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
int err;
|
||||
|
||||
err = shash_ahash_update(req, ahash_request_ctx(req));
|
||||
req->base.err = err;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (likely(tfm->using_shash))
|
||||
return shash_ahash_update(req, ahash_request_ctx(req));
|
||||
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_update);
|
||||
|
@ -612,14 +528,8 @@ int crypto_ahash_final(struct ahash_request *req)
|
|||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
int err;
|
||||
|
||||
err = crypto_shash_final(ahash_request_ctx(req), req->result);
|
||||
req->base.err = err;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (likely(tfm->using_shash))
|
||||
return crypto_shash_final(ahash_request_ctx(req), req->result);
|
||||
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_final);
|
||||
|
@ -628,18 +538,11 @@ int crypto_ahash_finup(struct ahash_request *req)
|
|||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
int err;
|
||||
|
||||
err = shash_ahash_finup(req, ahash_request_ctx(req));
|
||||
req->base.err = err;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (likely(tfm->using_shash))
|
||||
return shash_ahash_finup(req, ahash_request_ctx(req));
|
||||
if (!crypto_ahash_alg(tfm)->finup ||
|
||||
(!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
|
||||
(!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req)))
|
||||
return ahash_def_finup(req);
|
||||
|
||||
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
|
||||
|
@ -706,20 +609,12 @@ int crypto_ahash_digest(struct ahash_request *req)
|
|||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
|
||||
if (likely(tfm->using_shash)) {
|
||||
int err;
|
||||
|
||||
err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
|
||||
req->base.err = err;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
|
||||
if (likely(tfm->using_shash))
|
||||
return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
|
||||
if (!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req))
|
||||
return ahash_def_digest(req);
|
||||
|
||||
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||
return -ENOKEY;
|
||||
|
||||
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
|
||||
|
|
|
@ -267,11 +267,6 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
|
|||
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
|
||||
}
|
||||
|
||||
static inline bool crypto_request_chained(struct crypto_async_request *req)
|
||||
{
|
||||
return !list_empty(&req->list);
|
||||
}
|
||||
|
||||
static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm)
|
||||
{
|
||||
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN;
|
||||
|
|
|
@ -630,7 +630,6 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
|
|||
flags &= ~keep;
|
||||
req->base.flags &= keep;
|
||||
req->base.flags |= flags;
|
||||
crypto_reqchain_init(&req->base);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -679,12 +678,6 @@ static inline void ahash_request_set_virt(struct ahash_request *req,
|
|||
req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
|
||||
}
|
||||
|
||||
static inline void ahash_request_chain(struct ahash_request *req,
|
||||
struct ahash_request *head)
|
||||
{
|
||||
crypto_request_chain(&req->base, &head->base);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: Synchronous Message Digest API
|
||||
*
|
||||
|
@ -986,11 +979,6 @@ static inline void shash_desc_zero(struct shash_desc *desc)
|
|||
sizeof(*desc) + crypto_shash_descsize(desc->tfm));
|
||||
}
|
||||
|
||||
static inline int ahash_request_err(struct ahash_request *req)
|
||||
{
|
||||
return req->base.err;
|
||||
}
|
||||
|
||||
static inline bool ahash_is_async(struct crypto_ahash *tfm)
|
||||
{
|
||||
return crypto_tfm_is_async(&tfm->base);
|
||||
|
|
|
@ -247,11 +247,6 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
|
|||
return container_of(tfm, struct crypto_shash, base);
|
||||
}
|
||||
|
||||
static inline bool ahash_request_chained(struct ahash_request *req)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool ahash_request_isvirt(struct ahash_request *req)
|
||||
{
|
||||
return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -179,7 +178,6 @@ struct crypto_async_request {
|
|||
struct crypto_tfm *tfm;
|
||||
|
||||
u32 flags;
|
||||
int err;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -473,19 +471,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
|
|||
return __alignof__(tfm->__crt_ctx);
|
||||
}
|
||||
|
||||
static inline void crypto_reqchain_init(struct crypto_async_request *req)
|
||||
{
|
||||
req->err = -EINPROGRESS;
|
||||
INIT_LIST_HEAD(&req->list);
|
||||
}
|
||||
|
||||
static inline void crypto_request_chain(struct crypto_async_request *req,
|
||||
struct crypto_async_request *head)
|
||||
{
|
||||
req->err = -EINPROGRESS;
|
||||
list_add_tail(&req->list, &head->list);
|
||||
}
|
||||
|
||||
static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
|
||||
{
|
||||
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
|
|
Loading…
Add table
Reference in a new issue