Skip to content

Commit

Permalink
Merge branches 'cxgb4', 'ipath', 'ipoib', 'mlx4', 'mthca', 'nes', 'qi…
Browse files Browse the repository at this point in the history
…b' and 'srp' into for-next
  • Loading branch information
Roland Dreier committed Jan 11, 2011
8 parents db8b101 + 1eba27e + 8ae31e5 + 1397490 + d0444f1 + 601d87b + 4db62d4 + 9af7627 commit 2b76c05
Show file tree
Hide file tree
Showing 32 changed files with 887 additions and 531 deletions.
5 changes: 2 additions & 3 deletions drivers/infiniband/hw/ipath/ipath_driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
for (j = 0; j < 6; j++) {
if (!pdev->resource[j].start)
continue;
ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
j, (unsigned long long)pdev->resource[j].start,
(unsigned long long)pdev->resource[j].end,
ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
j, &pdev->resource[j],
(unsigned long long)pci_resource_len(pdev, j));
}

Expand Down
9 changes: 8 additions & 1 deletion drivers/infiniband/hw/mlx4/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,17 +397,24 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cq->resize_buf = NULL;
cq->resize_umem = NULL;
} else {
struct mlx4_ib_cq_buf tmp_buf;
int tmp_cqe = 0;

spin_lock_irq(&cq->lock);
if (cq->resize_buf) {
mlx4_ib_cq_resize_copy_cqes(cq);
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
tmp_buf = cq->buf;
tmp_cqe = cq->ibcq.cqe;
cq->buf = cq->resize_buf->buf;
cq->ibcq.cqe = cq->resize_buf->cqe;

kfree(cq->resize_buf);
cq->resize_buf = NULL;
}
spin_unlock_irq(&cq->lock);

if (tmp_cqe)
mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
}

goto out;
Expand Down
2 changes: 2 additions & 0 deletions drivers/infiniband/hw/mlx4/mad.c
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC);
if (IS_ERR(send_buf))
return;
/*
* We rely here on the fact that MLX QPs don't use the
* address handle after the send is posted (this is
Expand Down
2 changes: 2 additions & 0 deletions drivers/infiniband/hw/mthca/mthca_mad.c
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev,
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC);
if (IS_ERR(send_buf))
return;
/*
* We rely here on the fact that MLX QPs don't use the
* address handle after the send is posted (this is
Expand Down
4 changes: 2 additions & 2 deletions drivers/infiniband/hw/nes/nes_nic.c
Original file line number Diff line number Diff line change
Expand Up @@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nesvnic->nic_index &&
mc_index < max_pft_entries_avaiable) {
nes_debug(NES_DBG_NIC_RX,
"mc_index=%d skipping nic_index=%d,\
used for=%d \n", mc_index,
"mc_index=%d skipping nic_index=%d, "
"used for=%d \n", mc_index,
nesvnic->nic_index,
nesadapter->pft_mcast_map[mc_index]);
mc_index++;
Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/qib/qib.h
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ struct qib_devdata {
void (*f_sdma_hw_start_up)(struct qib_pportdata *);
void (*f_sdma_init_early)(struct qib_pportdata *);
void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
u32 (*f_hdrqempty)(struct qib_ctxtdata *);
u64 (*f_portcntr)(struct qib_pportdata *, u32);
u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
Expand Down
3 changes: 2 additions & 1 deletion drivers/infiniband/hw/qib/qib_cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
wc->head = next;

if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
(cq->notify == IB_CQ_SOLICITED &&
(solicited || entry->status != IB_WC_SUCCESS))) {
cq->notify = IB_CQ_NONE;
cq->triggered++;
/*
Expand Down
155 changes: 148 additions & 7 deletions drivers/infiniband/hw/qib/qib_driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
*/
#define QIB_PIO_MAXIBHDR 128

/*
* QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
*/
#define QIB_MAX_PKT_RECV 64

struct qlogic_ib_stats qib_stats;

const char *qib_get_unit_name(int unit)
Expand Down Expand Up @@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
* Returns 1 if error was a CRC, else 0.
* Needed for some chip's synthesized error counters.
*/
static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
struct qib_message_header *hdr)
static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
u32 ctxt, u32 eflags, u32 l, u32 etail,
__le32 *rhf_addr, struct qib_message_header *rhdr)
{
u32 ret = 0;

if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
ret = 1;
else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
/* For TIDERR and RC QPs premptively schedule a NAK */
struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
struct qib_other_headers *ohdr = NULL;
struct qib_ibport *ibp = &ppd->ibport_data;
struct qib_qp *qp = NULL;
u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
u16 lid = be16_to_cpu(hdr->lrh[1]);
int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
u32 qp_num;
u32 opcode;
u32 psn;
int diff;
unsigned long flags;

/* Sanity check packet */
if (tlen < 24)
goto drop;

if (lid < QIB_MULTICAST_LID_BASE) {
lid &= ~((1 << ppd->lmc) - 1);
if (unlikely(lid != ppd->lid))
goto drop;
}

/* Check for GRH */
if (lnh == QIB_LRH_BTH)
ohdr = &hdr->u.oth;
else if (lnh == QIB_LRH_GRH) {
u32 vtf;

ohdr = &hdr->u.l.oth;
if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
goto drop;
vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
goto drop;
} else
goto drop;

/* Get opcode and PSN from packet */
opcode = be32_to_cpu(ohdr->bth[0]);
opcode >>= 24;
psn = be32_to_cpu(ohdr->bth[2]);

/* Get the destination QP number. */
qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
if (qp_num != QIB_MULTICAST_QPN) {
int ruc_res;
qp = qib_lookup_qpn(ibp, qp_num);
if (!qp)
goto drop;

/*
* Handle only RC QPs - for other QP types drop error
* packet.
*/
spin_lock(&qp->r_lock);

/* Check for valid receive state. */
if (!(ib_qib_state_ops[qp->state] &
QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto unlock;
}

switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
spin_lock_irqsave(&qp->s_lock, flags);
ruc_res =
qib_ruc_check_hdr(
ibp, hdr,
lnh == QIB_LRH_GRH,
qp,
be32_to_cpu(ohdr->bth[0]));
if (ruc_res) {
spin_unlock_irqrestore(&qp->s_lock,
flags);
goto unlock;
}
spin_unlock_irqrestore(&qp->s_lock, flags);

/* Only deal with RDMA Writes for now */
if (opcode <
IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
diff = qib_cmp24(psn, qp->r_psn);
if (!qp->r_nak_state && diff >= 0) {
ibp->n_rc_seqnak++;
qp->r_nak_state =
IB_NAK_PSN_ERROR;
/* Use the expected PSN. */
qp->r_ack_psn = qp->r_psn;
/*
* Wait to send the sequence
* NAK until all packets
* in the receive queue have
* been processed.
* Otherwise, we end up
* propagating congestion.
*/
if (list_empty(&qp->rspwait)) {
qp->r_flags |=
QIB_R_RSP_NAK;
atomic_inc(
&qp->refcount);
list_add_tail(
&qp->rspwait,
&rcd->qp_wait_list);
}
} /* Out of sequence NAK */
} /* QP Request NAKs */
break;
case IB_QPT_SMI:
case IB_QPT_GSI:
case IB_QPT_UD:
case IB_QPT_UC:
default:
/* For now don't handle any other QP types */
break;
}

unlock:
spin_unlock(&qp->r_lock);
/*
* Notify qib_destroy_qp() if it is waiting
* for us to finish.
*/
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
} /* Unicast QP */
} /* Valid packet with TIDErr */

drop:
return ret;
}

Expand Down Expand Up @@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
}

for (last = 0, i = 1; !last && i <= 64; i += !last) {
for (last = 0, i = 1; !last; i += !last) {
hdr = dd->f_get_msgheader(dd, rhf_addr);
eflags = qib_hdrget_err_flags(rhf_addr);
etype = qib_hdrget_rcv_type(rhf_addr);
Expand Down Expand Up @@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
* packets; only qibhdrerr should be set.
*/
if (unlikely(eflags))
crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
etail, rhf_addr, hdr);
else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
qib_ib_rcv(rcd, hdr, ebuf, tlen);
Expand All @@ -384,6 +522,9 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
l += rsize;
if (l >= maxcnt)
l = 0;
if (i == QIB_MAX_PKT_RECV)
last = 1;

rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
if (dd->flags & QIB_NODMA_RTAIL) {
u32 seq = qib_hdrget_seq(rhf_addr);
Expand All @@ -402,7 +543,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
*/
lval = l;
if (!last && !(i & 0xf)) {
dd->f_update_usrhead(rcd, lval, updegr, etail);
dd->f_update_usrhead(rcd, lval, updegr, etail, i);
updegr = 0;
}
}
Expand Down Expand Up @@ -444,7 +585,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
* if no packets were processed.
*/
lval = (u64)rcd->head | dd->rhdrhead_intr_off;
dd->f_update_usrhead(rcd, lval, updegr, etail);
dd->f_update_usrhead(rcd, lval, updegr, etail, i);
return crcs;
}

Expand Down
10 changes: 5 additions & 5 deletions drivers/infiniband/hw/qib/qib_file_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -1379,25 +1379,25 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
/* find device (with ACTIVE ports) with fewest ctxts in use */
for (ndev = 0; ndev < devmax; ndev++) {
struct qib_devdata *dd = qib_lookup(ndev);
unsigned cused = 0, cfree = 0;
unsigned cused = 0, cfree = 0, pusable = 0;
if (!dd)
continue;
if (port && port <= dd->num_pports &&
usable(dd->pport + port - 1))
dusable = 1;
pusable = 1;
else
for (i = 0; i < dd->num_pports; i++)
if (usable(dd->pport + i))
dusable++;
if (!dusable)
pusable++;
if (!pusable)
continue;
for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
ctxt++)
if (dd->rcd[ctxt])
cused++;
else
cfree++;
if (cfree && cused < inuse) {
if (pusable && cfree && cused < inuse) {
udd = dd;
inuse = cused;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/qib/qib_iba6120.c
Original file line number Diff line number Diff line change
Expand Up @@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
}

static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd)
u32 updegr, u32 egrhd, u32 npkts)
{
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
Expand Down
4 changes: 2 additions & 2 deletions drivers/infiniband/hw/qib/qib_iba7220.c
Original file line number Diff line number Diff line change
Expand Up @@ -2297,7 +2297,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
nchipctxts = qib_read_kreg32(dd, kr_portcnt);
dd->cspec->numctxts = nchipctxts;
if (qib_n_krcv_queues > 1) {
dd->qpn_mask = 0x3f;
dd->qpn_mask = 0x3e;
dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
if (dd->first_user_ctxt > nchipctxts)
dd->first_user_ctxt = nchipctxts;
Expand Down Expand Up @@ -2703,7 +2703,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
}

static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd)
u32 updegr, u32 egrhd, u32 npkts)
{
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
Expand Down
Loading

0 comments on commit 2b76c05

Please sign in to comment.