crypto: acomp - Move stream management into scomp layer

Rather than allocating the stream memory in the request object,
move it into a per-cpu buffer managed by scomp.  This takes the
stress off the user from having to manage large request objects
and setting up their own per-cpu buffers in order to do so.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Herbert Xu 2025-03-09 10:43:17 +08:00
parent 0af7304c06
commit 3d72ad46a2
6 changed files with 84 additions and 93 deletions

View file

@ -123,36 +123,6 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
} }
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
struct acomp_req *req;
req = __acomp_request_alloc(acomp);
if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
return crypto_acomp_scomp_alloc_ctx(req);
return req;
}
EXPORT_SYMBOL_GPL(acomp_request_alloc);
void acomp_request_free(struct acomp_req *req)
{
struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
crypto_acomp_scomp_free_ctx(req);
if (req->base.flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
acomp->dst_free(req->dst);
req->dst = NULL;
}
__acomp_request_free(req);
}
EXPORT_SYMBOL_GPL(acomp_request_free);
void comp_prepare_alg(struct comp_alg_common *alg) void comp_prepare_alg(struct comp_alg_common *alg)
{ {
struct crypto_alg *base = &alg->base; struct crypto_alg *base = &alg->base;

View file

@ -15,8 +15,6 @@ struct acomp_req;
struct comp_alg_common; struct comp_alg_common;
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
void comp_prepare_alg(struct comp_alg_common *alg); void comp_prepare_alg(struct comp_alg_common *alg);

View file

@ -98,13 +98,62 @@ error:
return -ENOMEM; return -ENOMEM;
} }
static void scomp_free_streams(struct scomp_alg *alg)
{
struct crypto_acomp_stream __percpu *stream = alg->stream;
int i;
for_each_possible_cpu(i) {
struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
if (!ps->ctx)
break;
alg->free_ctx(ps);
}
free_percpu(stream);
}
static int scomp_alloc_streams(struct scomp_alg *alg)
{
struct crypto_acomp_stream __percpu *stream;
int i;
stream = alloc_percpu(struct crypto_acomp_stream);
if (!stream)
return -ENOMEM;
for_each_possible_cpu(i) {
struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
ps->ctx = alg->alloc_ctx();
if (IS_ERR(ps->ctx)) {
scomp_free_streams(alg);
return PTR_ERR(ps->ctx);
}
spin_lock_init(&ps->lock);
}
alg->stream = stream;
return 0;
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{ {
struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
int ret = 0; int ret = 0;
mutex_lock(&scomp_lock); mutex_lock(&scomp_lock);
if (!alg->stream) {
ret = scomp_alloc_streams(alg);
if (ret)
goto unlock;
}
if (!scomp_scratch_users++) if (!scomp_scratch_users++)
ret = crypto_scomp_alloc_scratches(); ret = crypto_scomp_alloc_scratches();
unlock:
mutex_unlock(&scomp_lock); mutex_unlock(&scomp_lock);
return ret; return ret;
@ -115,7 +164,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
void **tfm_ctx = acomp_tfm_ctx(tfm); void **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx; struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req); struct crypto_acomp_stream *stream;
struct scomp_scratch *scratch; struct scomp_scratch *scratch;
void *src, *dst; void *src, *dst;
unsigned int dlen; unsigned int dlen;
@ -148,12 +197,15 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
else else
dst = scratch->dst; dst = scratch->dst;
stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
spin_lock(&stream->lock);
if (dir) if (dir)
ret = crypto_scomp_compress(scomp, src, req->slen, ret = crypto_scomp_compress(scomp, src, req->slen,
dst, &req->dlen, *ctx); dst, &req->dlen, stream->ctx);
else else
ret = crypto_scomp_decompress(scomp, src, req->slen, ret = crypto_scomp_decompress(scomp, src, req->slen,
dst, &req->dlen, *ctx); dst, &req->dlen, stream->ctx);
spin_unlock(&stream->lock);
if (!ret) { if (!ret) {
if (!req->dst) { if (!req->dst) {
req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
@ -226,45 +278,19 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress; crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress; crt->decompress = scomp_acomp_decompress;
crt->dst_free = sgl_free; crt->dst_free = sgl_free;
crt->reqsize = sizeof(void *);
return 0; return 0;
} }
struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) static void crypto_scomp_destroy(struct crypto_alg *alg)
{ {
struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); scomp_free_streams(__crypto_scomp_alg(alg));
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void *ctx;
ctx = crypto_scomp_alloc_ctx(scomp);
if (IS_ERR(ctx)) {
kfree(req);
return NULL;
}
*req->__ctx = ctx;
return req;
}
void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
{
struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void *ctx = *req->__ctx;
if (ctx)
crypto_scomp_free_ctx(scomp, ctx);
} }
static const struct crypto_type crypto_scomp_type = { static const struct crypto_type crypto_scomp_type = {
.extsize = crypto_alg_extsize, .extsize = crypto_alg_extsize,
.init_tfm = crypto_scomp_init_tfm, .init_tfm = crypto_scomp_init_tfm,
.destroy = crypto_scomp_destroy,
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
.show = crypto_scomp_show, .show = crypto_scomp_show,
#endif #endif

View file

@ -10,8 +10,12 @@
#define _CRYPTO_ACOMP_H #define _CRYPTO_ACOMP_H
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h> #include <linux/container_of.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 #define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
#define CRYPTO_ACOMP_DST_MAX 131072 #define CRYPTO_ACOMP_DST_MAX 131072
@ -54,8 +58,14 @@ struct crypto_acomp {
struct crypto_tfm base; struct crypto_tfm base;
}; };
struct crypto_acomp_stream {
spinlock_t lock;
void *ctx;
};
#define COMP_ALG_COMMON { \ #define COMP_ALG_COMMON { \
struct crypto_alg base; \ struct crypto_alg base; \
struct crypto_acomp_stream __percpu *stream; \
} }
struct comp_alg_common COMP_ALG_COMMON; struct comp_alg_common COMP_ALG_COMMON;
@ -173,7 +183,16 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
* *
* Return: allocated handle in case of success or NULL in case of an error * Return: allocated handle in case of success or NULL in case of an error
*/ */
struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); static inline struct acomp_req *acomp_request_alloc_noprof(struct crypto_acomp *tfm)
{
struct acomp_req *req;
req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
if (likely(req))
acomp_request_set_tfm(req, tfm);
return req;
}
#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
/** /**
* acomp_request_free() -- zeroize and free asynchronous (de)compression * acomp_request_free() -- zeroize and free asynchronous (de)compression
@ -182,7 +201,10 @@ struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
* *
* @req: request to free * @req: request to free
*/ */
void acomp_request_free(struct acomp_req *req); static inline void acomp_request_free(struct acomp_req *req)
{
kfree_sensitive(req);
}
/** /**
* acomp_request_set_callback() -- Sets an asynchronous callback * acomp_request_set_callback() -- Sets an asynchronous callback

View file

@ -32,6 +32,7 @@
* *
* @reqsize: Context size for (de)compression requests * @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure * @base: Common crypto API algorithm data structure
* @stream: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with scomp * @calg: Cmonn algorithm data structure shared with scomp
*/ */
struct acomp_alg { struct acomp_alg {
@ -68,22 +69,6 @@ static inline void acomp_request_complete(struct acomp_req *req,
crypto_request_complete(&req->base, err); crypto_request_complete(&req->base, err);
} }
static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm)
{
struct acomp_req *req;
req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
if (likely(req))
acomp_request_set_tfm(req, tfm);
return req;
}
#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__))
static inline void __acomp_request_free(struct acomp_req *req)
{
kfree_sensitive(req);
}
/** /**
* crypto_register_acomp() -- Register asynchronous compression algorithm * crypto_register_acomp() -- Register asynchronous compression algorithm
* *

View file

@ -28,6 +28,7 @@ struct crypto_scomp {
* @compress: Function performs a compress operation * @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation * @decompress: Function performs a de-compress operation
* @base: Common crypto API algorithm data structure * @base: Common crypto API algorithm data structure
* @stream: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp * @calg: Cmonn algorithm data structure shared with acomp
*/ */
struct scomp_alg { struct scomp_alg {
@ -71,17 +72,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
} }
static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
{
return crypto_scomp_alg(tfm)->alloc_ctx();
}
static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
void *ctx)
{
return crypto_scomp_alg(tfm)->free_ctx(ctx);
}
static inline int crypto_scomp_compress(struct crypto_scomp *tfm, static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen, const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx) u8 *dst, unsigned int *dlen, void *ctx)