Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 97108
b: refs/heads/master
c: dbcd00e
h: refs/heads/master
v: v3
  • Loading branch information
Tom Tucker committed May 19, 2008
1 parent c8e4540 commit cd4de49
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0e7f011a19696cc25d68a8d6631fc6c5aa60a54c
refs/heads/master: dbcd00eba99945acfc433508a58eadc5dcd18cad
40 changes: 22 additions & 18 deletions trunk/net/sunrpc/xprtrdma/svc_rdma_transport.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,23 +228,8 @@ static void dto_tasklet_func(unsigned long data)
list_del_init(&xprt->sc_dto_q);
spin_unlock_irqrestore(&dto_lock, flags);

if (test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) {
ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
rq_cq_reap(xprt);
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
/*
* If data arrived before established event,
* don't enqueue. This defers RPC I/O until the
* RDMA connection is complete.
*/
if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
svc_xprt_enqueue(&xprt->sc_xprt);
}

if (test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) {
ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
sq_cq_reap(xprt);
}
rq_cq_reap(xprt);
sq_cq_reap(xprt);

svc_xprt_put(&xprt->sc_xprt);
spin_lock_irqsave(&dto_lock, flags);
Expand Down Expand Up @@ -297,6 +282,10 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
struct ib_wc wc;
struct svc_rdma_op_ctxt *ctxt = NULL;

if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
return;

ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
atomic_inc(&rdma_stat_rq_poll);

spin_lock_bh(&xprt->sc_rq_dto_lock);
Expand All @@ -316,6 +305,15 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)

if (ctxt)
atomic_inc(&rdma_stat_rq_prod);

set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
/*
* If data arrived before established event,
* don't enqueue. This defers RPC I/O until the
* RDMA connection is complete.
*/
if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
svc_xprt_enqueue(&xprt->sc_xprt);
}

/*
Expand All @@ -328,6 +326,11 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
struct ib_cq *cq = xprt->sc_sq_cq;
int ret;


if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
return;

ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
atomic_inc(&rdma_stat_sq_poll);
while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
Expand Down Expand Up @@ -1010,7 +1013,8 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) {
spin_unlock_bh(&xprt->sc_lock);
atomic_inc(&rdma_stat_sq_starve);
/* See if we can reap some SQ WR */

/* See if we can opportunistically reap SQ WR to make room */
sq_cq_reap(xprt);

/* Wait until SQ WR available if SQ still full */
Expand Down

0 comments on commit cd4de49

Please sign in to comment.