From cf30dfa37297e8726e0058d32f7b8c8a90e53d61 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 29 Jul 2010 15:56:37 +0000 Subject: [PATCH] --- yaml --- r: 206742 b: refs/heads/master c: 7a7008110b94dfaa90db4b0cc5b0c3f964c80506 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/infiniband/hw/qib/qib.h | 4 - trunk/drivers/infiniband/hw/qib/qib_common.h | 16 +- trunk/drivers/infiniband/hw/qib/qib_driver.c | 2 +- .../drivers/infiniband/hw/qib/qib_file_ops.c | 203 +++++++++--------- trunk/drivers/infiniband/hw/qib/qib_fs.c | 18 +- trunk/drivers/infiniband/hw/qib/qib_iba7322.c | 4 +- trunk/drivers/infiniband/hw/qib/qib_init.c | 2 +- trunk/drivers/infiniband/hw/qib/qib_qp.c | 2 +- trunk/drivers/infiniband/hw/qib/qib_rc.c | 47 ++-- trunk/drivers/infiniband/hw/qib/qib_sdma.c | 2 - trunk/drivers/infiniband/hw/qib/qib_sysfs.c | 21 +- trunk/drivers/infiniband/hw/qib/qib_tx.c | 2 +- trunk/drivers/infiniband/hw/qib/qib_uc.c | 6 + trunk/drivers/infiniband/hw/qib/qib_ud.c | 17 +- trunk/drivers/infiniband/hw/qib/qib_verbs.c | 7 +- trunk/drivers/infiniband/ulp/srp/ib_srp.c | 15 +- 17 files changed, 173 insertions(+), 197 deletions(-) diff --git a/[refs] b/[refs] index 861a11fd6867..523f1f90ed2a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: ba818afdc62590e95e45d63be96954ea568925bf +refs/heads/master: 7a7008110b94dfaa90db4b0cc5b0c3f964c80506 diff --git a/trunk/drivers/infiniband/hw/qib/qib.h b/trunk/drivers/infiniband/hw/qib/qib.h index 61de0654820e..3593983df7ba 100644 --- a/trunk/drivers/infiniband/hw/qib/qib.h +++ b/trunk/drivers/infiniband/hw/qib/qib.h @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include @@ -327,9 +326,6 @@ struct qib_verbs_txreq { #define QIB_DEFAULT_MTU 4096 -/* max number of IB ports supported per HCA */ -#define QIB_MAX_IB_PORTS 2 - /* * Possible IB config parameters for f_get/set_ib_table() */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_common.h b/trunk/drivers/infiniband/hw/qib/qib_common.h index 145da4040883..b3955ed8f794 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_common.h +++ b/trunk/drivers/infiniband/hw/qib/qib_common.h @@ -279,7 +279,7 @@ struct qib_base_info { * may not be implemented; the user code must deal with this if it * cares, or it must abort after initialization reports the difference. */ -#define QIB_USER_SWMINOR 11 +#define QIB_USER_SWMINOR 10 #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) @@ -301,18 +301,6 @@ struct qib_base_info { */ #define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION) -/* - * If the unit is specified via open, HCA choice is fixed. If port is - * specified, it's also fixed. Otherwise we try to spread contexts - * across ports and HCAs, using different algorithims. WITHIN is - * the old default, prior to this mechanism. - */ -#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then - * ports; this is the default */ -#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin - * active ports within), then next HCA */ -#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */ - /* * This structure is passed to qib_userinit() to tell the driver where * user code buffers are, sizes, etc. The offsets and sizes of the @@ -331,7 +319,7 @@ struct qib_user_info { /* size of struct base_info to write to */ __u32 spu_base_info_size; - __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */ + __u32 _spu_unused3; /* * If two or more processes wish to share a context, each process diff --git a/trunk/drivers/infiniband/hw/qib/qib_driver.c b/trunk/drivers/infiniband/hw/qib/qib_driver.c index 9cd193603fb1..f15ce076ac49 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_driver.c +++ b/trunk/drivers/infiniband/hw/qib/qib_driver.c @@ -335,7 +335,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ } - for (last = 0, i = 1; !last && i <= 64; i += !last) { + for (last = 0, i = 1; !last; i += !last) { hdr = dd->f_get_msgheader(dd, rhf_addr); eflags = qib_hdrget_err_flags(rhf_addr); etype = qib_hdrget_rcv_type(rhf_addr); diff --git a/trunk/drivers/infiniband/hw/qib/qib_file_ops.c b/trunk/drivers/infiniband/hw/qib/qib_file_ops.c index 6b11645edf35..a142a9eb5226 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/trunk/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1294,130 +1294,128 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, return ret; } -static inline int usable(struct qib_pportdata *ppd) +static inline int usable(struct qib_pportdata *ppd, int active_only) { struct qib_devdata *dd = ppd->dd; + u32 linkok = active_only ? QIBL_LINKACTIVE : + (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE); return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && - (ppd->lflags & QIBL_LINKACTIVE); -} - -/* - * Select a context on the given device, either using a requested port - * or the port based on the context number. - */ -static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, - const struct qib_user_info *uinfo) -{ - struct qib_pportdata *ppd = NULL; - int ret, ctxt; - - if (port) { - if (!usable(dd->pport + port - 1)) { - ret = -ENETDOWN; - goto done; - } else - ppd = dd->pport + port - 1; - } - for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; - ctxt++) - ; - if (ctxt == dd->cfgctxts) { - ret = -EBUSY; - goto done; - } - if (!ppd) { - u32 pidx = ctxt % dd->num_pports; - if (usable(dd->pport + pidx)) - ppd = dd->pport + pidx; - else { - for (pidx = 0; pidx < dd->num_pports && !ppd; - pidx++) - if (usable(dd->pport + pidx)) - ppd = dd->pport + pidx; - } - } - ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; -done: - return ret; + (ppd->lflags & linkok); } static int find_free_ctxt(int unit, struct file *fp, const struct qib_user_info *uinfo) { struct qib_devdata *dd = qib_lookup(unit); + struct qib_pportdata *ppd = NULL; int ret; + u32 ctxt; - if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) + if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) { ret = -ENODEV; - else - ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo); + goto bail; + } + + /* + * If users requests specific port, only try that one port, else + * select "best" port below, based on context. + */ + if (uinfo->spu_port) { + ppd = dd->pport + uinfo->spu_port - 1; + if (!usable(ppd, 0)) { + ret = -ENETDOWN; + goto bail; + } + } + + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { + if (dd->rcd[ctxt]) + continue; + /* + * The setting and clearing of user context rcd[x] protected + * by the qib_mutex + */ + if (!ppd) { + /* choose port based on ctxt, if up, else 1st up */ + ppd = dd->pport + (ctxt % dd->num_pports); + if (!usable(ppd, 0)) { + int i; + for (i = 0; i < dd->num_pports; i++) { + ppd = dd->pport + i; + if (usable(ppd, 0)) + break; + } + if (i == dd->num_pports) { + ret = -ENETDOWN; + goto bail; + } + } + } + ret = setup_ctxt(ppd, ctxt, fp, uinfo); + goto bail; + } + ret = -EBUSY; +bail: return ret; } -static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, - unsigned alg) +static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo) { - struct qib_devdata *udd = NULL; - int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i; + struct qib_pportdata *ppd; + int ret = 0, devmax; + int npresent, nup; + int ndev; u32 port = uinfo->spu_port, ctxt; devmax = qib_count_units(&npresent, &nup); - if (!npresent) { - ret = -ENXIO; - goto done; - } - if (nup == 0) { - ret = -ENETDOWN; - goto done; - } - if (alg == QIB_PORT_ALG_ACROSS) { - unsigned inuse = ~0U; - /* find device (with ACTIVE ports) with fewest ctxts in use */ - for (ndev = 0; ndev < devmax; ndev++) { - struct qib_devdata *dd = qib_lookup(ndev); - unsigned cused = 0, cfree = 0; - if (!dd) - continue; - if (port && port <= dd->num_pports && - usable(dd->pport + port - 1)) - dusable = 1; - else - for (i = 0; i < dd->num_pports; i++) - if (usable(dd->pport + i)) - dusable++; - if (!dusable) + for (ndev = 0; ndev < devmax; ndev++) { + struct qib_devdata *dd = qib_lookup(ndev); + + /* device portion of usable() */ + if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) + continue; + for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { + if (dd->rcd[ctxt]) continue; - for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; - ctxt++) - if (dd->rcd[ctxt]) - cused++; - else - cfree++; - if (cfree && cused < inuse) { - udd = dd; - inuse = cused; + if (port) { + if (port > dd->num_pports) + continue; + ppd = dd->pport + port - 1; + if (!usable(ppd, 0)) + continue; + } else { + /* + * choose port based on ctxt, if up, else + * first port that's up for multi-port HCA + */ + ppd = dd->pport + (ctxt % dd->num_pports); + if (!usable(ppd, 0)) { + int j; + + ppd = NULL; + for (j = 0; j < dd->num_pports && + !ppd; j++) + if (usable(dd->pport + j, 0)) + ppd = dd->pport + j; + if (!ppd) + continue; /* to next unit */ + } } - } - if (udd) { - ret = choose_port_ctxt(fp, udd, port, uinfo); + ret = setup_ctxt(ppd, ctxt, fp, uinfo); goto done; } - } else { - for (ndev = 0; ndev < devmax; ndev++) { - struct qib_devdata *dd = qib_lookup(ndev); - if (dd) { - ret = choose_port_ctxt(fp, dd, port, uinfo); - if (!ret) - goto done; - if (ret == -EBUSY) - dusable++; - } - } } - ret = dusable ? -EBUSY : -ENETDOWN; + + if (npresent) { + if (nup == 0) + ret = -ENETDOWN; + else + ret = -EBUSY; + } else + ret = -ENXIO; done: return ret; @@ -1483,7 +1481,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) { int ret; int i_minor; - unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS; + unsigned swmajor, swminor; /* Check to be sure we haven't already initialized this file */ if (ctxt_fp(fp)) { @@ -1500,9 +1498,6 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) swminor = uinfo->spu_userversion & 0xffff; - if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT) - alg = uinfo->spu_port_alg; - mutex_lock(&qib_mutex); if (qib_compatible_subctxts(swmajor, swminor) && @@ -1519,7 +1514,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) if (i_minor) ret = find_free_ctxt(i_minor - 1, fp, uinfo); else - ret = get_a_ctxt(fp, uinfo, alg); + ret = get_a_ctxt(fp, uinfo); done_chk_sdma: if (!ret) { @@ -1867,7 +1862,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd) { int ret = 0; - if (!usable(rcd->ppd)) { + if (!usable(rcd->ppd, 1)) { int i; /* * if link is down, or otherwise not usable, delay @@ -1886,7 +1881,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd) set_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[i]); } - for (i = 0; !usable(rcd->ppd) && i < 300; i++) + for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++) msleep(100); ret = -ENETDOWN; } diff --git a/trunk/drivers/infiniband/hw/qib/qib_fs.c b/trunk/drivers/infiniband/hw/qib/qib_fs.c index 9f989c0ba9d3..844954bf417b 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_fs.c +++ b/trunk/drivers/infiniband/hw/qib/qib_fs.c @@ -135,8 +135,8 @@ static ssize_t driver_names_read(struct file *file, char __user *buf, } static const struct file_operations driver_ops[] = { - { .read = driver_stats_read, .llseek = generic_file_llseek, }, - { .read = driver_names_read, .llseek = generic_file_llseek, }, + { .read = driver_stats_read, }, + { .read = driver_names_read, }, }; /* read the per-device counters */ @@ -164,8 +164,8 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, } static const struct file_operations cntr_ops[] = { - { .read = dev_counters_read, .llseek = generic_file_llseek, }, - { .read = dev_names_read, .llseek = generic_file_llseek, }, + { .read = dev_counters_read, }, + { .read = dev_names_read, }, }; /* @@ -210,9 +210,9 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf, } static const struct file_operations portcntr_ops[] = { - { .read = portnames_read, .llseek = generic_file_llseek, }, - { .read = portcntrs_1_read, .llseek = generic_file_llseek, }, - { .read = portcntrs_2_read, .llseek = generic_file_llseek, }, + { .read = portnames_read, }, + { .read = portcntrs_1_read, }, + { .read = portcntrs_2_read, }, }; /* @@ -261,8 +261,8 @@ static ssize_t qsfp_2_read(struct file *file, char __user *buf, } static const struct file_operations qsfp_ops[] = { - { .read = qsfp_1_read, .llseek = generic_file_llseek, }, - { .read = qsfp_2_read, .llseek = generic_file_llseek, }, + { .read = qsfp_1_read, }, + { .read = qsfp_2_read, }, }; static ssize_t flash_read(struct file *file, char __user *buf, diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c index 584d443b5335..5eedf83e2c3b 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5864,7 +5864,7 @@ static void write_7322_initregs(struct qib_devdata *dd) * Doesn't clear any of the error bits that might be set. */ val = TIDFLOW_ERRBITS; /* these are W1C */ - for (i = 0; i < dd->cfgctxts; i++) { + for (i = 0; i < dd->ctxtcnt; i++) { int flow; for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); @@ -7271,8 +7271,6 @@ static int serdes_7322_init(struct qib_pportdata *ppd) ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ data = qib_read_kreg_port(ppd, krp_serdesctrl); - /* Turn off IB latency mode */ - data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE); qib_write_kreg_port(ppd, krp_serdesctrl, data | SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); diff --git a/trunk/drivers/infiniband/hw/qib/qib_init.c b/trunk/drivers/infiniband/hw/qib/qib_init.c index f1d16d3a01f6..a873dd596e81 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_init.c +++ b/trunk/drivers/infiniband/hw/qib/qib_init.c @@ -93,7 +93,7 @@ unsigned long *qib_cpulist; void qib_set_ctxtcnt(struct qib_devdata *dd) { if (!qib_cfgctxts) - dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); + dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts < dd->num_pports) dd->cfgctxts = dd->ctxtcnt; else if (qib_cfgctxts <= dd->ctxtcnt) diff --git a/trunk/drivers/infiniband/hw/qib/qib_qp.c b/trunk/drivers/infiniband/hw/qib/qib_qp.c index 6c39851d2ded..e0f65e39076b 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_qp.c +++ b/trunk/drivers/infiniband/hw/qib/qib_qp.c @@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) * * Flushes both send and receive work queues. * Returns true if last WQE event should be generated. - * The QP r_lock and s_lock should be held and interrupts disabled. + * The QP s_lock should be held and interrupts disabled. * If we are already in error state, just return. */ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) diff --git a/trunk/drivers/infiniband/hw/qib/qib_rc.c b/trunk/drivers/infiniband/hw/qib/qib_rc.c index a0931119bd78..40c0a373719c 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_rc.c +++ b/trunk/drivers/infiniband/hw/qib/qib_rc.c @@ -868,7 +868,7 @@ static void reset_psn(struct qib_qp *qp, u32 psn) /* * Back up requester to resend the last un-ACKed request. - * The QP r_lock and s_lock should be held and interrupts disabled. + * The QP s_lock should be held and interrupts disabled. */ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) { @@ -911,8 +911,7 @@ static void rc_timeout(unsigned long arg) struct qib_ibport *ibp; unsigned long flags; - spin_lock_irqsave(&qp->r_lock, flags); - spin_lock(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & QIB_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->n_rc_timeouts++; @@ -921,8 +920,7 @@ static void rc_timeout(unsigned long arg) qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_schedule_send(qp); } - spin_unlock(&qp->s_lock); - spin_unlock_irqrestore(&qp->r_lock, flags); + spin_unlock_irqrestore(&qp->s_lock, flags); } /* @@ -1416,6 +1414,10 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, spin_lock_irqsave(&qp->s_lock, flags); + /* Double check we can process this now that we hold the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto ack_done; + /* Ignore invalid responses. */ if (qib_cmp24(psn, qp->s_next_psn) >= 0) goto ack_done; @@ -1659,6 +1661,9 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ibp->n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); + /* Double check we can process this now that we hold the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto unlock_done; for (i = qp->r_head_ack_queue; ; i = prev) { if (i == qp->s_tail_ack_queue) @@ -1873,6 +1878,9 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, psn = be32_to_cpu(ohdr->bth[2]); opcode >>= 24; + /* Prevent simultaneous processing after APM on different CPUs */ + spin_lock(&qp->r_lock); + /* * Process responses (ACKs) before anything else. Note that the * packet sequence number will be for something in the send work @@ -1883,14 +1891,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, opcode <= OP(ATOMIC_ACKNOWLEDGE)) { qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, hdrsize, pmtu, rcd); - return; + goto runlock; } /* Compute 24 bits worth of difference. */ diff = qib_cmp24(psn, qp->r_psn); if (unlikely(diff)) { if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) - return; + goto runlock; goto send_ack; } @@ -2082,6 +2090,9 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); + /* Double check we can process this while holding the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto srunlock; if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; @@ -2135,7 +2146,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); - goto sunlock; + goto srunlock; } case OP(COMPARE_SWAP): @@ -2154,6 +2165,9 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); + /* Double check we can process this while holding the s_lock. */ + if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + goto srunlock; if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; @@ -2199,7 +2213,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); - goto sunlock; + goto srunlock; } default: @@ -2213,7 +2227,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, /* Send an ACK if requested or required. */ if (psn & (1 << 31)) goto send_ack; - return; + goto runlock; rnr_nak: qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; @@ -2224,7 +2238,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - return; + goto runlock; nack_op_err: qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); @@ -2236,7 +2250,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - return; + goto runlock; nack_inv_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -2250,7 +2264,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - return; + goto runlock; nack_acc_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -2260,6 +2274,13 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, qp->r_ack_psn = qp->r_psn; send_ack: qib_send_rc_ack(qp); +runlock: + spin_unlock(&qp->r_lock); + return; + +srunlock: + spin_unlock_irqrestore(&qp->s_lock, flags); + spin_unlock(&qp->r_lock); return; sunlock: diff --git a/trunk/drivers/infiniband/hw/qib/qib_sdma.c b/trunk/drivers/infiniband/hw/qib/qib_sdma.c index cad44491320b..b8456881f7f6 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_sdma.c +++ b/trunk/drivers/infiniband/hw/qib/qib_sdma.c @@ -656,7 +656,6 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, } qp = tx->qp; qib_put_txreq(tx); - spin_lock(&qp->r_lock); spin_lock(&qp->s_lock); if (qp->ibqp.qp_type == IB_QPT_RC) { /* XXX what about error sending RDMA read responses? */ @@ -665,7 +664,6 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, } else if (qp->s_wqe) qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); spin_unlock(&qp->s_lock); - spin_unlock(&qp->r_lock); /* return zero to process the next send work request */ goto unlock; diff --git a/trunk/drivers/infiniband/hw/qib/qib_sysfs.c b/trunk/drivers/infiniband/hw/qib/qib_sysfs.c index d50a33fe8bbc..dab4d9f4a2cc 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/trunk/drivers/infiniband/hw/qib/qib_sysfs.c @@ -347,7 +347,7 @@ static struct kobj_type qib_sl2vl_ktype = { #define QIB_DIAGC_ATTR(N) \ static struct qib_diagc_attr qib_diagc_attr_##N = { \ - .attr = { .name = __stringify(N), .mode = 0664 }, \ + .attr = { .name = __stringify(N), .mode = 0444 }, \ .counter = offsetof(struct qib_ibport, n_##N) \ } @@ -403,27 +403,8 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); } -static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t size) -{ - struct qib_diagc_attr *dattr = - container_of(attr, struct qib_diagc_attr, attr); - struct qib_pportdata *ppd = - container_of(kobj, struct qib_pportdata, diagc_kobj); - struct qib_ibport *qibp = &ppd->ibport_data; - char *endp; - long val = simple_strtol(buf, &endp, 0); - - if (val < 0 || endp == buf) - return -EINVAL; - - *(u32 *)((char *) qibp + dattr->counter) = val; - return size; -} - static const struct sysfs_ops qib_diagc_ops = { .show = diagc_attr_show, - .store = diagc_attr_store, }; static struct kobj_type qib_diagc_ktype = { diff --git a/trunk/drivers/infiniband/hw/qib/qib_tx.c b/trunk/drivers/infiniband/hw/qib/qib_tx.c index 7f36454c225e..af30232b6831 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_tx.c +++ b/trunk/drivers/infiniband/hw/qib/qib_tx.c @@ -170,7 +170,7 @@ static int find_ctxt(struct qib_devdata *dd, unsigned bufn) void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, unsigned cnt) { - struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS]; + struct qib_pportdata *ppd, *pppd[dd->num_pports]; unsigned i; unsigned long flags; diff --git a/trunk/drivers/infiniband/hw/qib/qib_uc.c b/trunk/drivers/infiniband/hw/qib/qib_uc.c index b9c8b6346c1b..6c7fe78cca64 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_uc.c +++ b/trunk/drivers/infiniband/hw/qib/qib_uc.c @@ -272,6 +272,9 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, opcode >>= 24; memset(&wc, 0, sizeof wc); + /* Prevent simultaneous processing after APM on different CPUs */ + spin_lock(&qp->r_lock); + /* Compare the PSN verses the expected PSN. */ if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { /* @@ -531,6 +534,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, } qp->r_psn++; qp->r_state = opcode; + spin_unlock(&qp->r_lock); return; rewind: @@ -538,10 +542,12 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qp->r_sge.num_sge = 0; drop: ibp->n_pkt_drops++; + spin_unlock(&qp->r_lock); return; op_err: qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); + spin_unlock(&qp->r_lock); return; sunlock: diff --git a/trunk/drivers/infiniband/hw/qib/qib_ud.c b/trunk/drivers/infiniband/hw/qib/qib_ud.c index e1b3da2a1f85..c838cda73347 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_ud.c +++ b/trunk/drivers/infiniband/hw/qib/qib_ud.c @@ -534,6 +534,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, */ wc.byte_len = tlen + sizeof(struct ib_grh); + /* + * We need to serialize getting a receive work queue entry and + * generating a completion for it against QPs sending to this QP + * locally. + */ + spin_lock(&qp->r_lock); + /* * Get the next work request entry to find where to put the data. */ @@ -545,19 +552,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ret = qib_get_rwqe(qp, 0); if (ret < 0) { qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - return; + goto bail_unlock; } if (!ret) { if (qp->ibqp.qp_num == 0) ibp->n_vl15_dropped++; - return; + goto bail_unlock; } } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { qp->r_flags |= QIB_R_REUSE_SGE; ibp->n_pkt_drops++; - return; + goto bail_unlock; } if (has_grh) { qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, @@ -572,7 +579,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qp->r_sge.sge = *qp->r_sge.sg_list++; } if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) - return; + goto bail_unlock; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = IB_WC_RECV; @@ -594,5 +601,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); +bail_unlock: + spin_unlock(&qp->r_lock); bail:; } diff --git a/trunk/drivers/infiniband/hw/qib/qib_verbs.c b/trunk/drivers/infiniband/hw/qib/qib_verbs.c index 9fab40488850..cda8f4173d23 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_verbs.c +++ b/trunk/drivers/infiniband/hw/qib/qib_verbs.c @@ -550,12 +550,10 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, { struct qib_ibport *ibp = &rcd->ppd->ibport_data; - spin_lock(&qp->r_lock); - /* Check for valid receive state. */ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { ibp->n_pkt_drops++; - goto unlock; + return; } switch (qp->ibqp.qp_type) { @@ -579,9 +577,6 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, default: break; } - -unlock: - spin_unlock(&qp->r_lock); } /** diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index ed3f9ebae882..4675defb374c 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -879,21 +879,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) opcode = *(u8 *) iu->buf; if (0) { - int i; - shost_printk(KERN_ERR, target->scsi_host, PFX "recv completion, opcode 0x%02x\n", opcode); - - for (i = 0; i < wc->byte_len; ++i) { - if (i % 8 == 0) - printk(KERN_ERR " [%02x] ", i); - printk(" %02x", ((u8 *) iu->buf)[i]); - if ((i + 1) % 8 == 0) - printk("\n"); - } - - if (wc->byte_len % 8) - printk("\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, + iu->buf, wc->byte_len, true); } switch (opcode) {