crypto: engine - remove {prepare,unprepare}_crypt_hardware callbacks

The {prepare,unprepare}_crypt_hardware callbacks were added back in 2016
by commit 735d37b542 ("crypto: engine - Introduce the block request
crypto engine framework"), but they were never implemented by any driver.
Remove them as they are unused.

Since the 'engine->idling' and 'was_busy' flags are no longer needed,
remove them as well.

Signed-off-by: Ovidiu Panait <ovidiu.panait.oss@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Ovidiu Panait 2025-07-11 21:29:32 +03:00 committed by Herbert Xu
parent c470ffa6f4
commit 5eb32430df
3 changed files with 1 additions and 46 deletions

View file

@ -36,12 +36,6 @@ engine using ``crypto_engine_stop()`` and destroy the engine with
Before transferring any request, you have to fill the context enginectx by Before transferring any request, you have to fill the context enginectx by
providing functions for the following: providing functions for the following:
* ``prepare_crypt_hardware``: Called once before any prepare functions are
called.
* ``unprepare_crypt_hardware``: Called once after all unprepare functions have
been called.
* ``prepare_cipher_request``/``prepare_hash_request``: Called before each * ``prepare_cipher_request``/``prepare_hash_request``: Called before each
corresponding request is performed. If some processing or other preparatory corresponding request is performed. If some processing or other preparatory
work is required, do it here. work is required, do it here.

View file

@ -74,7 +74,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
struct crypto_engine_alg *alg; struct crypto_engine_alg *alg;
struct crypto_engine_op *op; struct crypto_engine_op *op;
unsigned long flags; unsigned long flags;
bool was_busy = false;
int ret; int ret;
spin_lock_irqsave(&engine->queue_lock, flags); spin_lock_irqsave(&engine->queue_lock, flags);
@ -83,12 +82,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!engine->retry_support && engine->cur_req) if (!engine->retry_support && engine->cur_req)
goto out; goto out;
/* If another context is idling then defer */
if (engine->idling) {
kthread_queue_work(engine->kworker, &engine->pump_requests);
goto out;
}
/* Check if the engine queue is idle */ /* Check if the engine queue is idle */
if (!crypto_queue_len(&engine->queue) || !engine->running) { if (!crypto_queue_len(&engine->queue) || !engine->running) {
if (!engine->busy) if (!engine->busy)
@ -102,15 +95,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
} }
engine->busy = false; engine->busy = false;
engine->idling = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (engine->unprepare_crypt_hardware &&
engine->unprepare_crypt_hardware(engine))
dev_err(engine->dev, "failed to unprepare crypt hardware\n");
spin_lock_irqsave(&engine->queue_lock, flags);
engine->idling = false;
goto out; goto out;
} }
@ -129,22 +113,11 @@ start_request:
if (!engine->retry_support) if (!engine->retry_support)
engine->cur_req = async_req; engine->cur_req = async_req;
if (engine->busy) if (!engine->busy)
was_busy = true;
else
engine->busy = true; engine->busy = true;
spin_unlock_irqrestore(&engine->queue_lock, flags); spin_unlock_irqrestore(&engine->queue_lock, flags);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
goto req_err_1;
}
}
alg = container_of(async_req->tfm->__crt_alg, alg = container_of(async_req->tfm->__crt_alg,
struct crypto_engine_alg, base); struct crypto_engine_alg, base);
op = &alg->op; op = &alg->op;
@ -474,7 +447,6 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
engine->rt = rt; engine->rt = rt;
engine->running = false; engine->running = false;
engine->busy = false; engine->busy = false;
engine->idling = false;
engine->retry_support = retry_support; engine->retry_support = retry_support;
engine->priv_data = dev; engine->priv_data = dev;

View file

@ -21,7 +21,6 @@ struct device;
/* /*
* struct crypto_engine - crypto hardware engine * struct crypto_engine - crypto hardware engine
* @name: the engine name * @name: the engine name
* @idling: the engine is entering idle state
* @busy: request pump is busy * @busy: request pump is busy
* @running: the engine is on working * @running: the engine is on working
* @retry_support: indication that the hardware allows re-execution * @retry_support: indication that the hardware allows re-execution
@ -31,12 +30,6 @@ struct device;
* @list: link with the global crypto engine list * @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue * @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine * @queue: the crypto queue of the engine
* @prepare_crypt_hardware: a request will soon arrive from the queue
* so the subsystem requests the driver to prepare the hardware
* by issuing this call
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
* @kworker: kthread worker struct for request pump * @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump * @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data * @priv_data: the engine private data
@ -44,7 +37,6 @@ struct device;
*/ */
struct crypto_engine { struct crypto_engine {
char name[ENGINE_NAME_LEN]; char name[ENGINE_NAME_LEN];
bool idling;
bool busy; bool busy;
bool running; bool running;
@ -56,9 +48,6 @@ struct crypto_engine {
struct crypto_queue queue; struct crypto_queue queue;
struct device *dev; struct device *dev;
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
struct kthread_worker *kworker; struct kthread_worker *kworker;
struct kthread_work pump_requests; struct kthread_work pump_requests;