Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Merge crypto tree to pick up scompress and ahash fixes.  The
scompress fix becomes mostly unnecessary as the bugs no longer
exist with the new acompress code.  However, keep the NULL assignment
in crypto_acomp_free_streams so that if the user decides to call
crypto_acomp_alloc_streams again it will work.
This commit is contained in:
Herbert Xu 2025-04-12 09:48:09 +08:00
commit 51a7c741f7
4 changed files with 8 additions and 77 deletions

View file

@ -483,6 +483,7 @@ void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
void (*free_ctx)(void *);
int i;
s->streams = NULL;
if (!streams)
return;

View file

@ -315,16 +315,7 @@ EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static bool ahash_request_hasvirt(struct ahash_request *req)
{
struct ahash_request *r2;
if (ahash_request_isvirt(req))
return true;
list_for_each_entry(r2, &req->base.list, base.list)
if (ahash_request_isvirt(r2))
return true;
return false;
return ahash_request_isvirt(req);
}
static int ahash_reqchain_virt(struct ahash_save_req_state *state,
@ -472,7 +463,6 @@ static int ahash_do_req_chain(struct ahash_request *req,
bool update = op == crypto_ahash_alg(tfm)->update;
struct ahash_save_req_state *state;
struct ahash_save_req_state state0;
struct ahash_request *r2;
u8 *page = NULL;
int err;
@ -509,7 +499,6 @@ static int ahash_do_req_chain(struct ahash_request *req,
state->offset = 0;
state->nbytes = 0;
INIT_LIST_HEAD(&state->head);
list_splice_init(&req->base.list, &state->head);
if (page)
sg_init_one(&state->sg, page, PAGE_SIZE);
@ -540,9 +529,6 @@ out_free_page:
out_set_chain:
req->base.err = err;
list_for_each_entry(r2, &req->base.list, base.list)
r2->base.err = err;
return err;
}
@ -551,19 +537,10 @@ int crypto_ahash_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
struct ahash_request *r2;
int err;
err = crypto_shash_init(prepare_shash_desc(req, tfm));
req->base.err = err;
list_for_each_entry(r2, &req->base.list, base.list) {
struct shash_desc *desc;
desc = prepare_shash_desc(r2, tfm);
r2->base.err = crypto_shash_init(desc);
}
return err;
}
@ -620,19 +597,10 @@ int crypto_ahash_update(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
struct ahash_request *r2;
int err;
err = shash_ahash_update(req, ahash_request_ctx(req));
req->base.err = err;
list_for_each_entry(r2, &req->base.list, base.list) {
struct shash_desc *desc;
desc = ahash_request_ctx(r2);
r2->base.err = shash_ahash_update(r2, desc);
}
return err;
}
@ -645,19 +613,10 @@ int crypto_ahash_final(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
struct ahash_request *r2;
int err;
err = crypto_shash_final(ahash_request_ctx(req), req->result);
req->base.err = err;
list_for_each_entry(r2, &req->base.list, base.list) {
struct shash_desc *desc;
desc = ahash_request_ctx(r2);
r2->base.err = crypto_shash_final(desc, r2->result);
}
return err;
}
@ -670,19 +629,10 @@ int crypto_ahash_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
struct ahash_request *r2;
int err;
err = shash_ahash_finup(req, ahash_request_ctx(req));
req->base.err = err;
list_for_each_entry(r2, &req->base.list, base.list) {
struct shash_desc *desc;
desc = ahash_request_ctx(r2);
r2->base.err = shash_ahash_finup(r2, desc);
}
return err;
}
@ -757,19 +707,10 @@ int crypto_ahash_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
if (likely(tfm->using_shash)) {
struct ahash_request *r2;
int err;
err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
req->base.err = err;
list_for_each_entry(r2, &req->base.list, base.list) {
struct shash_desc *desc;
desc = prepare_shash_desc(r2, tfm);
r2->base.err = shash_ahash_digest(r2, desc);
}
return err;
}
@ -1133,20 +1074,5 @@ int ahash_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
void ahash_request_free(struct ahash_request *req)
{
struct ahash_request *tmp;
struct ahash_request *r2;
if (unlikely(!req))
return;
list_for_each_entry_safe(r2, tmp, &req->base.list, base.list)
kfree_sensitive(r2);
kfree_sensitive(req);
}
EXPORT_SYMBOL_GPL(ahash_request_free);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");

View file

@ -10,6 +10,7 @@
#include <linux/atomic.h>
#include <linux/crypto.h>
#include <linux/slab.h>
#include <linux/string.h>
/* Set this bit for virtual address instead of SG list. */
@ -581,7 +582,10 @@ static inline struct ahash_request *ahash_request_alloc_noprof(
* ahash_request_free() - zeroize and free the request data structure
* @req: request data structure cipher handle to be freed
*/
void ahash_request_free(struct ahash_request *req);
static inline void ahash_request_free(struct ahash_request *req)
{
kfree_sensitive(req);
}
static inline struct ahash_request *ahash_request_cast(
struct crypto_async_request *req)

View file

@ -249,7 +249,7 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
static inline bool ahash_request_chained(struct ahash_request *req)
{
return crypto_request_chained(&req->base);
return false;
}
static inline bool ahash_request_isvirt(struct ahash_request *req)