From 3d90b335c7e667fddec4ca059bc61b4b14e73d7e Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Fri, 11 Mar 2011 22:30:01 +0000 Subject: [PATCH] --- yaml --- r: 235124 b: refs/heads/master c: b52fe09e3309c3d7069cd0e5a3bdb5b4ba45e01f h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/infiniband/core/cm.c | 20 +-------- trunk/drivers/infiniband/core/cma.c | 58 ++++++++++++-------------- trunk/drivers/infiniband/hw/cxgb4/cm.c | 4 +- 4 files changed, 31 insertions(+), 53 deletions(-) diff --git a/[refs] b/[refs] index c563d69071e7..56b11d79c991 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a396d43a35fb91f2d4920a4700d25ecc5ec92404 +refs/heads/master: b52fe09e3309c3d7069cd0e5a3bdb5b4ba45e01f diff --git a/trunk/drivers/infiniband/core/cm.c b/trunk/drivers/infiniband/core/cm.c index f804e28e1ebb..64e0903091a8 100644 --- a/trunk/drivers/infiniband/core/cm.c +++ b/trunk/drivers/infiniband/core/cm.c @@ -1988,10 +1988,6 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id, goto out; } - if (cm_id->lap_state == IB_CM_LAP_SENT || - cm_id->lap_state == IB_CM_MRA_LAP_RCVD) - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); - ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) { cm_enter_timewait(cm_id_priv); @@ -2133,10 +2129,6 @@ static int cm_dreq_handler(struct cm_work *work) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); break; case IB_CM_ESTABLISHED: - if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || - cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); - break; case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: @@ -2357,18 +2349,9 @@ static int cm_rej_handler(struct cm_work *work) /* fall through */ case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: + case IB_CM_ESTABLISHED: cm_enter_timewait(cm_id_priv); break; - case IB_CM_ESTABLISHED: - if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || - cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { - if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) - ib_cancel_mad(cm_id_priv->av.port->mad_agent, - cm_id_priv->msg); - cm_enter_timewait(cm_id_priv); - break; - } - /* fall through */ default: spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; @@ -3006,7 +2989,6 @@ static int cm_sidr_req_handler(struct cm_work *work) goto out; /* No match. */ } atomic_inc(&cur_cm_id_priv->refcount); - atomic_inc(&cm_id_priv->refcount); spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; diff --git a/trunk/drivers/infiniband/core/cma.c b/trunk/drivers/infiniband/core/cma.c index 5ed9d25d021a..6884da24fde1 100644 --- a/trunk/drivers/infiniband/core/cma.c +++ b/trunk/drivers/infiniband/core/cma.c @@ -308,13 +308,11 @@ static inline void release_mc(struct kref *kref) kfree(mc); } -static void cma_release_dev(struct rdma_id_private *id_priv) +static void cma_detach_from_dev(struct rdma_id_private *id_priv) { - mutex_lock(&lock); list_del(&id_priv->list); cma_deref_dev(id_priv->cma_dev); id_priv->cma_dev = NULL; - mutex_unlock(&lock); } static int cma_set_qkey(struct rdma_id_private *id_priv) @@ -375,7 +373,6 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; - mutex_lock(&lock); iboe_addr_get_sgid(dev_addr, &iboe_gid); memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof gid); @@ -401,7 +398,6 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) if (!ret) cma_attach_to_dev(id_priv, cma_dev); - mutex_unlock(&lock); return ret; } @@ -908,14 +904,9 @@ void rdma_destroy_id(struct rdma_cm_id *id) state = cma_exch(id_priv, CMA_DESTROYING); cma_cancel_operation(id_priv, state); - /* - * Wait for any active callback to finish. New callbacks will find - * the id_priv state set to destroying and abort. - */ - mutex_lock(&id_priv->handler_mutex); - mutex_unlock(&id_priv->handler_mutex); - + mutex_lock(&lock); if (id_priv->cma_dev) { + mutex_unlock(&lock); switch (rdma_node_get_transport(id_priv->id.device->node_type)) { case RDMA_TRANSPORT_IB: if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) @@ -929,8 +920,10 @@ void rdma_destroy_id(struct rdma_cm_id *id) break; } cma_leave_mc_groups(id_priv); - cma_release_dev(id_priv); + mutex_lock(&lock); + cma_detach_from_dev(id_priv); } + mutex_unlock(&lock); cma_release_port(id_priv); cma_deref_id(id_priv); @@ -1207,7 +1200,9 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); + mutex_lock(&lock); ret = cma_acquire_dev(conn_id); + mutex_unlock(&lock); if (ret) goto release_conn_id; @@ -1215,11 +1210,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) cm_id->context = conn_id; cm_id->cm_handler = cma_ib_handler; - /* - * Protect against the user destroying conn_id from another thread - * until we're done accessing it. - */ - atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (!ret) { /* @@ -1232,10 +1222,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); - cma_deref_id(conn_id); goto out; } - cma_deref_id(conn_id); /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; @@ -1406,7 +1394,9 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, goto out; } + mutex_lock(&lock); ret = cma_acquire_dev(conn_id); + mutex_unlock(&lock); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); @@ -1435,25 +1425,17 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; event.param.conn.responder_resources = attr.max_qp_rd_atom; - - /* - * Protect against the user destroying conn_id from another thread - * until we're done accessing it. - */ - atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; cma_exch(conn_id, CMA_DESTROYING); mutex_unlock(&conn_id->handler_mutex); - cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); goto out; } mutex_unlock(&conn_id->handler_mutex); - cma_deref_id(conn_id); out: if (dev) @@ -1969,11 +1951,20 @@ static void addr_handler(int status, struct sockaddr *src_addr, memset(&event, 0, sizeof event); mutex_lock(&id_priv->handler_mutex); - if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) + + /* + * Grab mutex to block rdma_destroy_id() from removing the device while + * we're trying to acquire it. + */ + mutex_lock(&lock); + if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { + mutex_unlock(&lock); goto out; + } if (!status && !id_priv->cma_dev) status = cma_acquire_dev(id_priv); + mutex_unlock(&lock); if (status) { if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) @@ -2274,7 +2265,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (ret) goto err1; + mutex_lock(&lock); ret = cma_acquire_dev(id_priv); + mutex_unlock(&lock); if (ret) goto err1; } @@ -2286,8 +2279,11 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) return 0; err2: - if (id_priv->cma_dev) - cma_release_dev(id_priv); + if (id_priv->cma_dev) { + mutex_lock(&lock); + cma_detach_from_dev(id_priv); + mutex_unlock(&lock); + } err1: cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); return ret; diff --git a/trunk/drivers/infiniband/hw/cxgb4/cm.c b/trunk/drivers/infiniband/hw/cxgb4/cm.c index 8b00e6c46f01..65d3fe6cfd5c 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/cm.c +++ b/trunk/drivers/infiniband/hw/cxgb4/cm.c @@ -61,9 +61,9 @@ static char *states[] = { NULL, }; -static int dack_mode; +static int dack_mode = 1; module_param(dack_mode, int, 0644); -MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); +MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); int c4iw_max_read_depth = 8; module_param(c4iw_max_read_depth, int, 0644);