Skip to content

Commit

Permalink
Merge branch 'cxgb4-next'
Browse files Browse the repository at this point in the history
Hariprasad Shenai says:

====================
Misc. fixes for iw_cxgb4

This patch series adds support to determine ingress padding boundary at runtime.
Advertise a larger max read queue depth for qps, and gather the resource limits
from fw and use them to avoid exhausting all the resources and display TPTE on
errors and add support for work request logging feature.

The patches series is created against 'net-next' tree.
And includes patches on cxgb4 and iw_cxgb4 driver.

Since this patch-series contains changes which are dependent on commit id
fc5ab02 ("cxgb4: Replaced the backdoor mechanism to access the HW memory with
PCIe Window method") we would like to request this patch series to get merged
via David Miller's 'net-next' tree.

We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.

V2:
 Optimized alloc_ird function, and several other changes related to debug prints
 based on review comments given by Yann Droneaud.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jul 15, 2014
2 parents 5ee2c94 + 7730b4c commit 7cf6604
Show file tree
Hide file tree
Showing 13 changed files with 528 additions and 61 deletions.
80 changes: 57 additions & 23 deletions drivers/infiniband/hw/cxgb4/cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,10 @@ static int dack_mode = 1;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");

int c4iw_max_read_depth = 8;
uint c4iw_max_read_depth = 32;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
MODULE_PARM_DESC(c4iw_max_read_depth,
"Per-connection max ORD/IRD (default=32)");

static int enable_tcp_timestamps;
module_param(enable_tcp_timestamps, int, 0644);
Expand Down Expand Up @@ -813,6 +814,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
if (mpa_rev_to_use == 2) {
mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
sizeof (struct mpa_v2_conn_params));
PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);

Expand Down Expand Up @@ -1182,8 +1185,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
sizeof(struct mpa_v2_conn_params);
} else {
/* this means MPA_v1 is used. Send max supported */
event.ord = c4iw_max_read_depth;
event.ird = c4iw_max_read_depth;
event.ord = cur_max_read_depth(ep->com.dev);
event.ird = cur_max_read_depth(ep->com.dev);
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
}
Expand Down Expand Up @@ -1247,6 +1250,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return credits;
}

#define RELAXED_IRD_NEGOTIATION 1

static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
Expand Down Expand Up @@ -1358,17 +1363,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
__func__, resp_ird, resp_ord, ep->ird, ep->ord);

/*
* This is a double-check. Ideally, below checks are
* not required since ird/ord stuff has been taken
* care of in c4iw_accept_cr
*/
if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
if (ep->ird < resp_ord) {
if (RELAXED_IRD_NEGOTIATION && resp_ord <=
ep->com.dev->rdev.lldi.max_ordird_qp)
ep->ird = resp_ord;
else
insuff_ird = 1;
} else if (ep->ird > resp_ord) {
ep->ird = resp_ord;
}
if (ep->ord > resp_ird) {
if (RELAXED_IRD_NEGOTIATION)
ep->ord = resp_ird;
else
insuff_ird = 1;
}
if (insuff_ird) {
err = -ENOMEM;
ep->ird = resp_ord;
ep->ord = resp_ird;
insuff_ird = 1;
}

if (ntohs(mpa_v2_params->ird) &
Expand Down Expand Up @@ -1571,6 +1592,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
ep->ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
if (ntohs(mpa_v2_params->ord) &
Expand Down Expand Up @@ -2724,40 +2747,50 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
BUG_ON(!qp);

set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
(conn_param->ird > cur_max_read_depth(ep->com.dev))) {
abort_connection(ep, NULL, GFP_KERNEL);
err = -EINVAL;
goto err;
}

if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
if (conn_param->ord > ep->ird) {
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
send_mpa_reject(ep, conn_param->private_data,
conn_param->private_data_len);
abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
goto err;
if (RELAXED_IRD_NEGOTIATION) {
ep->ord = ep->ird;
} else {
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
send_mpa_reject(ep, conn_param->private_data,
conn_param->private_data_len);
abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
goto err;
}
}
if (conn_param->ird > ep->ord) {
if (!ep->ord)
conn_param->ird = 1;
else {
if (conn_param->ird < ep->ord) {
if (RELAXED_IRD_NEGOTIATION &&
ep->ord <= h->rdev.lldi.max_ordird_qp) {
conn_param->ird = ep->ord;
} else {
abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
goto err;
}
}

}
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;

if (ep->mpa_attr.version != 2)
if (ep->mpa_attr.version == 1) {
if (peer2peer && ep->ird == 0)
ep->ird = 1;
} else {
if (peer2peer &&
(ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
(p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
ep->ird = 1;
}

PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);

Expand Down Expand Up @@ -2796,6 +2829,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return 0;
err1:
ep->com.cm_id = NULL;
abort_connection(ep, NULL, GFP_KERNEL);
cm_id->rem_ref(cm_id);
err:
mutex_unlock(&ep->com.mutex);
Expand Down Expand Up @@ -2879,8 +2913,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int iptype;
int iwpm_err = 0;

if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
if ((conn_param->ord > cur_max_read_depth(dev)) ||
(conn_param->ird > cur_max_read_depth(dev))) {
err = -EINVAL;
goto out;
}
Expand Down
8 changes: 6 additions & 2 deletions drivers/infiniband/hw/cxgb4/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -633,11 +633,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
wq->sq.cidx = (uint16_t)idx;
PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
BUG_ON(t4_rq_empty(wq));
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_rq_consume(wq);
goto skip_cqe;
}
Expand Down Expand Up @@ -895,7 +899,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
/*
* Make actual HW queue 2x to avoid cdix_inc overflows.
*/
hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);

/*
* Make HW queue at least 64 entries so GTS updates aren't too
Expand All @@ -912,7 +916,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp->cq.queue;
while (hwentries > T4_MAX_IQ_SIZE) {
while (hwentries > rhp->rdev.hw_queue.t4_max_iq_size) {
memsize -= PAGE_SIZE;
hwentries = memsize / sizeof *chp->cq.queue;
}
Expand Down
Loading

0 comments on commit 7cf6604

Please sign in to comment.