6.6-stable review patch. If anyone has any objections, please let me know.
------------------
From: Herbert Xu herbert@gondor.apana.org.au
[ Upstream commit 442134ab30e75b7229c4bfc1ac5641d245cffe27 ]
If an error occurs during queueing the engine load will never be decremented. Fix this by moving the engine load adjustment into the cleanup function.
Fixes: bf8f91e71192 ("crypto: marvell - Add load balancing between engines") Signed-off-by: Herbert Xu herbert@gondor.apana.org.au Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/crypto/marvell/cesa/cipher.c | 4 +++- drivers/crypto/marvell/cesa/hash.c | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index 3876e3ce822f..eabed9d977df 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req) static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req) { struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); + struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_skcipher_dma_cleanup(req); + + atomic_sub(req->cryptlen, &engine->load); }
static void mv_cesa_skcipher_std_step(struct skcipher_request *req) @@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req) struct mv_cesa_engine *engine = creq->base.engine; unsigned int ivsize;
- atomic_sub(skreq->cryptlen, &engine->load); ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c index 6815eddc9068..e339ce7ad533 100644 --- a/drivers/crypto/marvell/cesa/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) { struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) mv_cesa_ahash_dma_cleanup(req); + + atomic_sub(req->nbytes, &engine->load); }
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) @@ -395,8 +398,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req) } } } - - atomic_sub(ahashreq->nbytes, &engine->load); }
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,