crypto: ahash - Remove request chaining

Request chaining requires the user to do too much book keeping.
Remove it from ahash.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Herbert Xu 2025-04-12 13:37:00 +08:00
parent 69e5a1228d
commit 5bb61dc76d
5 changed files with 32 additions and 174 deletions

View file

@ -43,10 +43,7 @@ struct crypto_hash_walk {
}; };
struct ahash_save_req_state { struct ahash_save_req_state {
struct list_head head;
struct ahash_request *req0; struct ahash_request *req0;
struct ahash_request *cur;
int (*op)(struct ahash_request *req);
crypto_completion_t compl; crypto_completion_t compl;
void *data; void *data;
struct scatterlist sg; struct scatterlist sg;
@ -54,9 +51,9 @@ struct ahash_save_req_state {
u8 *page; u8 *page;
unsigned int offset; unsigned int offset;
unsigned int nbytes; unsigned int nbytes;
bool update;
}; };
static void ahash_reqchain_done(void *data, int err);
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt); static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
static void ahash_restore_req(struct ahash_request *req); static void ahash_restore_req(struct ahash_request *req);
static void ahash_def_finup_done1(void *data, int err); static void ahash_def_finup_done1(void *data, int err);
@ -313,21 +310,17 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
} }
EXPORT_SYMBOL_GPL(crypto_ahash_setkey); EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static bool ahash_request_hasvirt(struct ahash_request *req)
{
return ahash_request_isvirt(req);
}
static int ahash_reqchain_virt(struct ahash_save_req_state *state, static int ahash_reqchain_virt(struct ahash_save_req_state *state,
int err, u32 mask) int err, u32 mask)
{ {
struct ahash_request *req = state->cur; struct ahash_request *req = state->req0;
struct crypto_ahash *tfm;
tfm = crypto_ahash_reqtfm(req);
for (;;) { for (;;) {
unsigned len = state->nbytes; unsigned len = state->nbytes;
req->base.err = err;
if (!state->offset) if (!state->offset)
break; break;
@ -346,10 +339,9 @@ static int ahash_reqchain_virt(struct ahash_save_req_state *state,
state->offset += len; state->offset += len;
req->nbytes = len; req->nbytes = len;
err = state->op(req); err = crypto_ahash_alg(tfm)->update(req);
if (err == -EINPROGRESS) { if (err == -EINPROGRESS) {
if (!list_empty(&state->head) || if (state->offset < state->nbytes)
state->offset < state->nbytes)
err = -EBUSY; err = -EBUSY;
break; break;
} }
@ -365,64 +357,12 @@ static int ahash_reqchain_finish(struct ahash_request *req0,
struct ahash_save_req_state *state, struct ahash_save_req_state *state,
int err, u32 mask) int err, u32 mask)
{ {
struct ahash_request *req = state->cur;
struct crypto_ahash *tfm;
struct ahash_request *n;
bool update;
u8 *page; u8 *page;
err = ahash_reqchain_virt(state, err, mask); err = ahash_reqchain_virt(state, err, mask);
if (err == -EINPROGRESS || err == -EBUSY) if (err == -EINPROGRESS || err == -EBUSY)
goto out; goto out;
if (req != req0)
list_add_tail(&req->base.list, &req0->base.list);
tfm = crypto_ahash_reqtfm(req);
update = state->op == crypto_ahash_alg(tfm)->update;
list_for_each_entry_safe(req, n, &state->head, base.list) {
list_del_init(&req->base.list);
req->base.flags &= mask;
req->base.complete = ahash_reqchain_done;
req->base.data = state;
state->cur = req;
if (update && ahash_request_isvirt(req) && req->nbytes) {
unsigned len = req->nbytes;
u8 *result = req->result;
state->src = req->svirt;
state->nbytes = len;
len = min(PAGE_SIZE, len);
memcpy(state->page, req->svirt, len);
state->offset = len;
ahash_request_set_crypt(req, &state->sg, result, len);
}
err = state->op(req);
if (err == -EINPROGRESS) {
if (!list_empty(&state->head) ||
state->offset < state->nbytes)
err = -EBUSY;
goto out;
}
if (err == -EBUSY)
goto out;
err = ahash_reqchain_virt(state, err, mask);
if (err == -EINPROGRESS || err == -EBUSY)
goto out;
list_add_tail(&req->base.list, &req0->base.list);
}
page = state->page; page = state->page;
if (page) { if (page) {
memset(page, 0, PAGE_SIZE); memset(page, 0, PAGE_SIZE);
@ -442,7 +382,7 @@ static void ahash_reqchain_done(void *data, int err)
data = state->data; data = state->data;
if (err == -EINPROGRESS) { if (err == -EINPROGRESS) {
if (!list_empty(&state->head) || state->offset < state->nbytes) if (state->offset < state->nbytes)
return; return;
goto notify; goto notify;
} }
@ -467,21 +407,14 @@ static int ahash_do_req_chain(struct ahash_request *req,
int err; int err;
if (crypto_ahash_req_chain(tfm) || if (crypto_ahash_req_chain(tfm) ||
(!ahash_request_chained(req) && !update || !ahash_request_isvirt(req))
(!update || !ahash_request_isvirt(req))))
return op(req); return op(req);
if (update && ahash_request_hasvirt(req)) { if (update && ahash_request_isvirt(req)) {
gfp_t gfp; page = (void *)__get_free_page(GFP_ATOMIC);
u32 flags;
flags = ahash_request_flags(req);
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
page = (void *)__get_free_page(gfp);
err = -ENOMEM; err = -ENOMEM;
if (!page) if (!page)
goto out_set_chain; goto out;
} }
state = &state0; state = &state0;
@ -493,12 +426,10 @@ static int ahash_do_req_chain(struct ahash_request *req,
state = req->base.data; state = req->base.data;
} }
state->op = op; state->update = update;
state->cur = req;
state->page = page; state->page = page;
state->offset = 0; state->offset = 0;
state->nbytes = 0; state->nbytes = 0;
INIT_LIST_HEAD(&state->head);
if (page) if (page)
sg_init_one(&state->sg, page, PAGE_SIZE); sg_init_one(&state->sg, page, PAGE_SIZE);
@ -519,16 +450,18 @@ static int ahash_do_req_chain(struct ahash_request *req,
} }
err = op(req); err = op(req);
if (err == -EBUSY || err == -EINPROGRESS) if (err == -EINPROGRESS || err == -EBUSY) {
return -EBUSY; if (state->offset < state->nbytes)
err = -EBUSY;
return err;
}
return ahash_reqchain_finish(req, state, err, ~0); return ahash_reqchain_finish(req, state, err, ~0);
out_free_page: out_free_page:
free_page((unsigned long)page); free_page((unsigned long)page);
out_set_chain: out:
req->base.err = err;
return err; return err;
} }
@ -536,17 +469,10 @@ int crypto_ahash_init(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) { if (likely(tfm->using_shash))
int err; return crypto_shash_init(prepare_shash_desc(req, tfm));
err = crypto_shash_init(prepare_shash_desc(req, tfm));
req->base.err = err;
return err;
}
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY; return -ENOKEY;
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init); return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
} }
EXPORT_SYMBOL_GPL(crypto_ahash_init); EXPORT_SYMBOL_GPL(crypto_ahash_init);
@ -555,15 +481,11 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ahash_save_req_state *state; struct ahash_save_req_state *state;
gfp_t gfp;
u32 flags;
if (!ahash_is_async(tfm)) if (!ahash_is_async(tfm))
return 0; return 0;
flags = ahash_request_flags(req); state = kmalloc(sizeof(*state), GFP_ATOMIC);
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
state = kmalloc(sizeof(*state), gfp);
if (!state) if (!state)
return -ENOMEM; return -ENOMEM;
@ -596,14 +518,8 @@ int crypto_ahash_update(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) { if (likely(tfm->using_shash))
int err; return shash_ahash_update(req, ahash_request_ctx(req));
err = shash_ahash_update(req, ahash_request_ctx(req));
req->base.err = err;
return err;
}
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update); return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
} }
EXPORT_SYMBOL_GPL(crypto_ahash_update); EXPORT_SYMBOL_GPL(crypto_ahash_update);
@ -612,14 +528,8 @@ int crypto_ahash_final(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) { if (likely(tfm->using_shash))
int err; return crypto_shash_final(ahash_request_ctx(req), req->result);
err = crypto_shash_final(ahash_request_ctx(req), req->result);
req->base.err = err;
return err;
}
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final); return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
} }
EXPORT_SYMBOL_GPL(crypto_ahash_final); EXPORT_SYMBOL_GPL(crypto_ahash_final);
@ -628,18 +538,11 @@ int crypto_ahash_finup(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) { if (likely(tfm->using_shash))
int err; return shash_ahash_finup(req, ahash_request_ctx(req));
err = shash_ahash_finup(req, ahash_request_ctx(req));
req->base.err = err;
return err;
}
if (!crypto_ahash_alg(tfm)->finup || if (!crypto_ahash_alg(tfm)->finup ||
(!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))) (!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req)))
return ahash_def_finup(req); return ahash_def_finup(req);
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup); return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
} }
EXPORT_SYMBOL_GPL(crypto_ahash_finup); EXPORT_SYMBOL_GPL(crypto_ahash_finup);
@ -706,20 +609,12 @@ int crypto_ahash_digest(struct ahash_request *req)
{ {
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) { if (likely(tfm->using_shash))
int err; return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
if (!crypto_ahash_req_chain(tfm) && ahash_request_isvirt(req))
err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
req->base.err = err;
return err;
}
if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
return ahash_def_digest(req); return ahash_def_digest(req);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY; return -ENOKEY;
return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest); return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
} }
EXPORT_SYMBOL_GPL(crypto_ahash_digest); EXPORT_SYMBOL_GPL(crypto_ahash_digest);

View file

@ -267,11 +267,6 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
} }
static inline bool crypto_request_chained(struct crypto_async_request *req)
{
return !list_empty(&req->list);
}
static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm) static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm)
{ {
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN; return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN;

View file

@ -630,7 +630,6 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
flags &= ~keep; flags &= ~keep;
req->base.flags &= keep; req->base.flags &= keep;
req->base.flags |= flags; req->base.flags |= flags;
crypto_reqchain_init(&req->base);
} }
/** /**
@ -679,12 +678,6 @@ static inline void ahash_request_set_virt(struct ahash_request *req,
req->base.flags |= CRYPTO_AHASH_REQ_VIRT; req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
} }
static inline void ahash_request_chain(struct ahash_request *req,
struct ahash_request *head)
{
crypto_request_chain(&req->base, &head->base);
}
/** /**
* DOC: Synchronous Message Digest API * DOC: Synchronous Message Digest API
* *
@ -986,11 +979,6 @@ static inline void shash_desc_zero(struct shash_desc *desc)
sizeof(*desc) + crypto_shash_descsize(desc->tfm)); sizeof(*desc) + crypto_shash_descsize(desc->tfm));
} }
static inline int ahash_request_err(struct ahash_request *req)
{
return req->base.err;
}
static inline bool ahash_is_async(struct crypto_ahash *tfm) static inline bool ahash_is_async(struct crypto_ahash *tfm)
{ {
return crypto_tfm_is_async(&tfm->base); return crypto_tfm_is_async(&tfm->base);

View file

@ -247,11 +247,6 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_shash, base); return container_of(tfm, struct crypto_shash, base);
} }
static inline bool ahash_request_chained(struct ahash_request *req)
{
return false;
}
static inline bool ahash_request_isvirt(struct ahash_request *req) static inline bool ahash_request_isvirt(struct ahash_request *req)
{ {
return req->base.flags & CRYPTO_AHASH_REQ_VIRT; return req->base.flags & CRYPTO_AHASH_REQ_VIRT;

View file

@ -14,7 +14,6 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/list.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
@ -179,7 +178,6 @@ struct crypto_async_request {
struct crypto_tfm *tfm; struct crypto_tfm *tfm;
u32 flags; u32 flags;
int err;
}; };
/** /**
@ -473,19 +471,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx); return __alignof__(tfm->__crt_ctx);
} }
static inline void crypto_reqchain_init(struct crypto_async_request *req)
{
req->err = -EINPROGRESS;
INIT_LIST_HEAD(&req->list);
}
static inline void crypto_request_chain(struct crypto_async_request *req,
struct crypto_async_request *head)
{
req->err = -EINPROGRESS;
list_add_tail(&req->list, &head->list);
}
static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm) static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
{ {
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;