Skip to content

Commit

Permalink
crypto: inside-secure - dequeue all requests at once
Browse files Browse the repository at this point in the history
This patch updates the dequeueing logic to dequeue all requests at once.
Since we can have many requests in the queue, the interrupt coalescing
is kept so that the ring interrupt fires every EIP197_MAX_BATCH_SZ at
most.

To allow dequeueing all requests at once while still using reasonable
settings for the interrupt coalescing, the result handling function was
updated to setup the threshold interrupt when needed (i.e. when more
requests than EIP197_MAX_BATCH_SZ are in the queue). When using this
capability the ring is marked as busy so that the dequeue function
enqueue new requests without setting the threshold interrupt.

Suggested-by: Ofer Heifetz <oferh@marvell.com>
Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  • Loading branch information
Antoine Ténart authored and Herbert Xu committed Dec 22, 2017
1 parent 69ee4dd commit dc7e28a
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 12 deletions.
60 changes: 48 additions & 12 deletions drivers/crypto/inside-secure/safexcel.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,14 +422,31 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0;
}

/* Called with ring's lock taken */
int safexcel_try_push_requests(struct safexcel_crypto_priv *priv, int ring,
int reqs)
{
int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);

if (!coal)
return 0;

/* Configure when we want an interrupt */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);

return coal;
}

void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;

do {
while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
Expand Down Expand Up @@ -463,18 +480,24 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)

cdesc += commands;
rdesc += results;
} while (nreq++ < EIP197_MAX_BATCH_SZ);
nreq++;
}

finalize:
if (!nreq)
return;

spin_lock_bh(&priv->ring[ring].lock);
spin_lock_bh(&priv->ring[ring].egress_lock);

/* Configure when we want an interrupt */
writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
if (!priv->ring[ring].busy) {
nreq -= safexcel_try_push_requests(priv, ring, nreq);
if (nreq)
priv->ring[ring].busy = true;
}

priv->ring[ring].requests_left += nreq;

spin_unlock_bh(&priv->ring[ring].egress_lock);

/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
Expand All @@ -483,8 +506,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset) << 2,
priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);

spin_unlock_bh(&priv->ring[ring].lock);
}

void safexcel_free_context(struct safexcel_crypto_priv *priv,
Expand Down Expand Up @@ -579,14 +600,14 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
int ret, i, nreq, ndesc = 0;
int ret, i, nreq, ndesc = 0, done;
bool should_complete;

nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
nreq >>= 24;
nreq &= GENMASK(6, 0);
if (!nreq)
return;
goto requests_left;

for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock);
Expand All @@ -601,7 +622,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
if (ndesc < 0) {
kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
return;
goto requests_left;
}

writel(EIP197_xDR_PROC_xD_PKT(1) |
Expand All @@ -616,6 +637,18 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv

kfree(sreq);
}

requests_left:
spin_lock_bh(&priv->ring[ring].egress_lock);

done = safexcel_try_push_requests(priv, ring,
priv->ring[ring].requests_left);

priv->ring[ring].requests_left -= done;
if (!done && !priv->ring[ring].requests_left)
priv->ring[ring].busy = false;

spin_unlock_bh(&priv->ring[ring].egress_lock);
}

static void safexcel_dequeue_work(struct work_struct *work)
Expand Down Expand Up @@ -861,6 +894,9 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}

priv->ring[i].requests_left = 0;
priv->ring[i].busy = false;

crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);

Expand Down
8 changes: 8 additions & 0 deletions drivers/crypto/inside-secure/safexcel.h
Original file line number Diff line number Diff line change
Expand Up @@ -489,6 +489,14 @@ struct safexcel_crypto_priv {
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;

/* Number of requests in the engine that needs the threshold
* interrupt to be set up.
*/
int requests_left;

/* The ring is currently handling at least one request */
bool busy;
} ring[EIP197_MAX_RINGS];
};

Expand Down

0 comments on commit dc7e28a

Please sign in to comment.