Skip to content

Commit

Permalink
crypto: marvell/cesa - fix memory leak
Browse files Browse the repository at this point in the history
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.

Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.

As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  • Loading branch information
Boris BREZILLON authored and Herbert Xu committed Mar 17, 2016
1 parent 03a6f29 commit 7850c91
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 69 deletions.
3 changes: 2 additions & 1 deletion drivers/crypto/marvell/cesa.h
Original file line number Diff line number Diff line change
Expand Up @@ -588,6 +588,7 @@ struct mv_cesa_ahash_dma_req {
struct mv_cesa_tdma_req base;
u8 *padding;
dma_addr_t padding_dma;
u8 *cache;
dma_addr_t cache_dma;
};

Expand All @@ -609,7 +610,7 @@ struct mv_cesa_ahash_req {
struct mv_cesa_ahash_std_req std;
} req;
struct mv_cesa_op_ctx op_tmpl;
u8 *cache;
u8 cache[CESA_MAX_HASH_BLOCK_SIZE];
unsigned int cache_ptr;
u64 len;
int src_nents;
Expand Down
86 changes: 18 additions & 68 deletions drivers/crypto/marvell/hash.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,69 +45,25 @@ mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
return mv_cesa_req_dma_iter_next_op(&iter->base);
}

static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq,
gfp_t flags)
static inline int
mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
{
struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma;

creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
&dreq->cache_dma);
if (!creq->cache)
return -ENOMEM;

return 0;
}

static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq,
gfp_t flags)
{
creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags);
if (!creq->cache)
req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
&req->cache_dma);
if (!req->cache)
return -ENOMEM;

return 0;
}

static int mv_cesa_ahash_alloc_cache(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int ret;

if (creq->cache)
return 0;

if (creq->req.base.type == CESA_DMA_REQ)
ret = mv_cesa_ahash_dma_alloc_cache(creq, flags);
else
ret = mv_cesa_ahash_std_alloc_cache(creq, flags);

return ret;
}

static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq)
{
dma_pool_free(cesa_dev->dma->cache_pool, creq->cache,
creq->req.dma.cache_dma);
}

static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq)
{
kfree(creq->cache);
}

static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq)
static inline void
mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
{
if (!creq->cache)
if (!req->cache)
return;

if (creq->req.base.type == CESA_DMA_REQ)
mv_cesa_ahash_dma_free_cache(creq);
else
mv_cesa_ahash_std_free_cache(creq);

creq->cache = NULL;
dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
req->cache_dma);
}

static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
Expand Down Expand Up @@ -146,6 +102,7 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);

dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
mv_cesa_ahash_dma_free_cache(&creq->req.dma);
mv_cesa_dma_cleanup(&creq->req.dma.base);
}

Expand All @@ -161,8 +118,6 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);

mv_cesa_ahash_free_cache(creq);

if (creq->req.base.type == CESA_DMA_REQ)
mv_cesa_ahash_dma_last_cleanup(req);
}
Expand Down Expand Up @@ -445,14 +400,6 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
int ret;

if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) &&
!creq->last_req) {
ret = mv_cesa_ahash_alloc_cache(req);
if (ret)
return ret;
}

if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
*cached = true;
Expand Down Expand Up @@ -505,10 +452,17 @@ mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
gfp_t flags)
{
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
int ret;

if (!creq->cache_ptr)
return 0;

ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
if (ret)
return ret;

memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);

return mv_cesa_dma_add_data_transfer(chain,
CESA_SA_DATA_SRAM_OFFSET,
ahashdreq->cache_dma,
Expand Down Expand Up @@ -848,10 +802,6 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
if (!cache_ptr)
return 0;

ret = mv_cesa_ahash_alloc_cache(req);
if (ret)
return ret;

memcpy(creq->cache, cache, cache_ptr);
creq->cache_ptr = cache_ptr;

Expand Down

0 comments on commit 7850c91

Please sign in to comment.