Skip to content

Commit

Permalink
IB/cma: Check for GID on listening device first
Browse files Browse the repository at this point in the history
As a simple optimization that should speed up the vast majority of
connect attemps on IB devices, when we are searching for the GID of an
incoming connection in the cached GID lists of devices, search the
device that received the incoming connection request first.  If we
don't find it there, then move on to other devices.

This reduces the time to perform 10,000 connections considerably.
Prior to this patch, a bad run of cmtime would look like this:

connect      :    12399.26   12351.10    8609.00    1239.93

With this patch, it looks more like this:

connect      :     5864.86    5799.80    8876.00     586.49

Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
  • Loading branch information
Doug Ledford authored and Roland Dreier committed Nov 8, 2013
1 parent 29f27e8 commit be9130c
Showing 1 changed file with 30 additions and 6 deletions.
36 changes: 30 additions & 6 deletions drivers/infiniband/core/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,8 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
return ret;
}

static int cma_acquire_dev(struct rdma_id_private *id_priv)
static int cma_acquire_dev(struct rdma_id_private *id_priv,
struct rdma_id_private *listen_id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct cma_device *cma_dev;
Expand All @@ -367,8 +368,30 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
iboe_addr_get_sgid(dev_addr, &iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
if (listen_id_priv &&
rdma_port_get_link_layer(listen_id_priv->id.device,
listen_id_priv->id.port_num) == dev_ll) {
cma_dev = listen_id_priv->cma_dev;
port = listen_id_priv->id.port_num;
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
&found_port, NULL);
else
ret = ib_find_cached_gid(cma_dev->device, &gid,
&found_port, NULL);

if (!ret && (port == found_port)) {
id_priv->id.port_num = found_port;
goto out;
}
}
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port)
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
if (listen_id_priv &&
listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
Expand All @@ -381,6 +404,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
goto out;
}
}
}
}

out:
Expand Down Expand Up @@ -1269,7 +1293,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
}

mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
ret = cma_acquire_dev(conn_id);
ret = cma_acquire_dev(conn_id, listen_id);
if (ret)
goto err2;

Expand Down Expand Up @@ -1458,7 +1482,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
goto out;
}

ret = cma_acquire_dev(conn_id);
ret = cma_acquire_dev(conn_id, listen_id);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
Expand Down Expand Up @@ -2027,7 +2051,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
goto out;

if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv);
status = cma_acquire_dev(id_priv, NULL);

if (status) {
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
Expand Down Expand Up @@ -2524,7 +2548,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;

ret = cma_acquire_dev(id_priv);
ret = cma_acquire_dev(id_priv, NULL);
if (ret)
goto err1;
}
Expand Down

0 comments on commit be9130c

Please sign in to comment.