Skip to content

Commit

Permalink
xprtrdma: Add "reset MRs" memreg op
Browse files Browse the repository at this point in the history
This method is invoked when a transport instance is about to be
reconnected. Each Memory Region object is reset to its initial
state.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Tested-by: Devesh Sharma <Devesh.Sharma@Emulex.Com>
Tested-by: Meghana Cheripady <Meghana.Cheripady@Emulex.Com>
Tested-by: Veeresh U. Kokatnur <veereshuk@chelsio.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
  • Loading branch information
Chuck Lever authored and Anna Schumaker committed Mar 31, 2015
1 parent 91e70e7 commit 31a701a
Show file tree
Hide file tree
Showing 5 changed files with 83 additions and 101 deletions.
23 changes: 23 additions & 0 deletions net/sunrpc/xprtrdma/fmr_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,10 +146,33 @@ fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
return nsegs;
}

/* After a disconnect, unmap all FMRs.
*
* This is invoked only in the transport connect worker in order
* to serialize with rpcrdma_register_fmr_external().
*/
static void
fmr_op_reset(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_mw *r;
LIST_HEAD(list);
int rc;

list_for_each_entry(r, &buf->rb_all, mw_all)
list_add(&r->r.fmr->list, &list);

rc = ib_unmap_fmr(&list);
if (rc)
dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
__func__, rc);
}

const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
.ro_map = fmr_op_map,
.ro_unmap = fmr_op_unmap,
.ro_maxpages = fmr_op_maxpages,
.ro_init = fmr_op_init,
.ro_reset = fmr_op_reset,
.ro_displayname = "fmr",
};
51 changes: 51 additions & 0 deletions net/sunrpc/xprtrdma/frwr_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,18 @@ __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
return rc;
}

static void
__frwr_release(struct rpcrdma_mw *r)
{
int rc;

rc = ib_dereg_mr(r->r.frmr.fr_mr);
if (rc)
dprintk("RPC: %s: ib_dereg_mr status %i\n",
__func__, rc);
ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
}

/* FRWR mode conveys a list of pages per chunk segment. The
* maximum length of that list is the FRWR page list depth.
*/
Expand Down Expand Up @@ -210,10 +222,49 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
return nsegs;
}

/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
* an unusable state. Find FRMRs in this state and dereg / reg
* each. FRMRs that are VALID and attached to an rpcrdma_req are
* also torn down.
*
* This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
*
* This is invoked only in the transport connect worker in order
* to serialize with rpcrdma_register_frmr_external().
*/
static void
frwr_op_reset(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct ib_device *device = r_xprt->rx_ia.ri_id->device;
unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
struct rpcrdma_mw *r;
int rc;

list_for_each_entry(r, &buf->rb_all, mw_all) {
if (r->r.frmr.fr_state == FRMR_IS_INVALID)
continue;

__frwr_release(r);
rc = __frwr_init(r, pd, device, depth);
if (rc) {
dprintk("RPC: %s: mw %p left %s\n",
__func__, r,
(r->r.frmr.fr_state == FRMR_IS_STALE ?
"stale" : "valid"));
continue;
}

r->r.frmr.fr_state = FRMR_IS_INVALID;
}
}

const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
.ro_unmap = frwr_op_unmap,
.ro_maxpages = frwr_op_maxpages,
.ro_init = frwr_op_init,
.ro_reset = frwr_op_reset,
.ro_displayname = "frwr",
};
6 changes: 6 additions & 0 deletions net/sunrpc/xprtrdma/physical_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,16 @@ physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
return 1;
}

static void
physical_op_reset(struct rpcrdma_xprt *r_xprt)
{
}

const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
.ro_map = physical_op_map,
.ro_unmap = physical_op_unmap,
.ro_maxpages = physical_op_maxpages,
.ro_init = physical_op_init,
.ro_reset = physical_op_reset,
.ro_displayname = "physical",
};
103 changes: 2 additions & 101 deletions net/sunrpc/xprtrdma/verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,6 @@
# define RPCDBG_FACILITY RPCDBG_TRANS
#endif

static void rpcrdma_reset_frmrs(struct rpcrdma_ia *);
static void rpcrdma_reset_fmrs(struct rpcrdma_ia *);

/*
* internal functions
*/
Expand Down Expand Up @@ -945,21 +942,9 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
rpcrdma_ep_disconnect(ep, ia);
rpcrdma_flush_cqs(ep);

switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR:
rpcrdma_reset_frmrs(ia);
break;
case RPCRDMA_MTHCAFMR:
rpcrdma_reset_fmrs(ia);
break;
case RPCRDMA_ALLPHYSICAL:
break;
default:
rc = -EIO;
goto out;
}

xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
ia->ri_ops->ro_reset(xprt);

id = rpcrdma_create_id(xprt, ia,
(struct sockaddr *)&xprt->rx_data.addr);
if (IS_ERR(id)) {
Expand Down Expand Up @@ -1289,90 +1274,6 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
kfree(buf->rb_pool);
}

/* After a disconnect, unmap all FMRs.
*
* This is invoked only in the transport connect worker in order
* to serialize with rpcrdma_register_fmr_external().
*/
static void
rpcrdma_reset_fmrs(struct rpcrdma_ia *ia)
{
struct rpcrdma_xprt *r_xprt =
container_of(ia, struct rpcrdma_xprt, rx_ia);
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct list_head *pos;
struct rpcrdma_mw *r;
LIST_HEAD(l);
int rc;

list_for_each(pos, &buf->rb_all) {
r = list_entry(pos, struct rpcrdma_mw, mw_all);

INIT_LIST_HEAD(&l);
list_add(&r->r.fmr->list, &l);
rc = ib_unmap_fmr(&l);
if (rc)
dprintk("RPC: %s: ib_unmap_fmr failed %i\n",
__func__, rc);
}
}

/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
* an unusable state. Find FRMRs in this state and dereg / reg
* each. FRMRs that are VALID and attached to an rpcrdma_req are
* also torn down.
*
* This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
*
* This is invoked only in the transport connect worker in order
* to serialize with rpcrdma_register_frmr_external().
*/
static void
rpcrdma_reset_frmrs(struct rpcrdma_ia *ia)
{
struct rpcrdma_xprt *r_xprt =
container_of(ia, struct rpcrdma_xprt, rx_ia);
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct list_head *pos;
struct rpcrdma_mw *r;
int rc;

list_for_each(pos, &buf->rb_all) {
r = list_entry(pos, struct rpcrdma_mw, mw_all);

if (r->r.frmr.fr_state == FRMR_IS_INVALID)
continue;

rc = ib_dereg_mr(r->r.frmr.fr_mr);
if (rc)
dprintk("RPC: %s: ib_dereg_mr failed %i\n",
__func__, rc);
ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);

r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
ia->ri_max_frmr_depth);
if (IS_ERR(r->r.frmr.fr_mr)) {
rc = PTR_ERR(r->r.frmr.fr_mr);
dprintk("RPC: %s: ib_alloc_fast_reg_mr"
" failed %i\n", __func__, rc);
continue;
}
r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
ia->ri_id->device,
ia->ri_max_frmr_depth);
if (IS_ERR(r->r.frmr.fr_pgl)) {
rc = PTR_ERR(r->r.frmr.fr_pgl);
dprintk("RPC: %s: "
"ib_alloc_fast_reg_page_list "
"failed %i\n", __func__, rc);

ib_dereg_mr(r->r.frmr.fr_mr);
continue;
}
r->r.frmr.fr_state = FRMR_IS_INVALID;
}
}

/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
* some req segments uninitialized.
*/
Expand Down
1 change: 1 addition & 0 deletions net/sunrpc/xprtrdma/xprt_rdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,7 @@ struct rpcrdma_memreg_ops {
struct rpcrdma_mr_seg *);
size_t (*ro_maxpages)(struct rpcrdma_xprt *);
int (*ro_init)(struct rpcrdma_xprt *);
void (*ro_reset)(struct rpcrdma_xprt *);
const char *ro_displayname;
};

Expand Down

0 comments on commit 31a701a

Please sign in to comment.