From 8b1d284de8aa05b95504f076a26ba6a1c4a5c725 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Mon, 23 May 2011 17:52:46 -0700 Subject: [PATCH] --- yaml --- r: 272359 b: refs/heads/master c: 59991f94eb32e954aa767f659eb642461e9e8b37 h: refs/heads/master i: 272357: 4dcdd3536a68d8f8793738d6be6794e0517700c3 272355: 4074ad887968fd0c492a37f723f7ce7740d60e72 272351: 6c74da58aebe74e8fd41d66c31c5f5bc3330fbaa v: v3 --- [refs] | 2 +- trunk/drivers/infiniband/core/verbs.c | 26 ++++ trunk/drivers/infiniband/hw/qib/qib.h | 15 +- trunk/drivers/infiniband/hw/qib/qib_driver.c | 20 ++- .../drivers/infiniband/hw/qib/qib_file_ops.c | 2 - trunk/drivers/infiniband/hw/qib/qib_iba6120.c | 2 - trunk/drivers/infiniband/hw/qib/qib_iba7220.c | 2 - trunk/drivers/infiniband/hw/qib/qib_iba7322.c | 135 ++++++------------ trunk/drivers/infiniband/hw/qib/qib_init.c | 8 +- trunk/drivers/infiniband/hw/qib/qib_qp.c | 90 ++++-------- trunk/drivers/infiniband/hw/qib/qib_qsfp.c | 25 ++-- trunk/drivers/infiniband/hw/qib/qib_qsfp.h | 3 - trunk/drivers/infiniband/hw/qib/qib_rc.c | 36 +++-- trunk/drivers/infiniband/hw/qib/qib_ruc.c | 7 +- trunk/drivers/infiniband/hw/qib/qib_sysfs.c | 3 +- trunk/drivers/infiniband/hw/qib/qib_uc.c | 25 ++-- trunk/drivers/infiniband/hw/qib/qib_verbs.c | 36 ++--- trunk/drivers/infiniband/hw/qib/qib_verbs.h | 5 +- trunk/include/rdma/ib_verbs.h | 22 +++ 19 files changed, 192 insertions(+), 272 deletions(-) diff --git a/[refs] b/[refs] index fb49f15780b6..1c7973c7b5b1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 16d99812d58b8af2df29cd337a74cd965b53da04 +refs/heads/master: 59991f94eb32e954aa767f659eb642461e9e8b37 diff --git a/trunk/drivers/infiniband/core/verbs.c b/trunk/drivers/infiniband/core/verbs.c index af7a8b08b2e9..5e03ab78fc8e 100644 --- a/trunk/drivers/infiniband/core/verbs.c +++ b/trunk/drivers/infiniband/core/verbs.c @@ -920,3 +920,29 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) return qp->device->detach_mcast(qp, gid, lid); } EXPORT_SYMBOL(ib_detach_mcast); + +struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) +{ + struct ib_xrcd *xrcd; + + if (!device->alloc_xrcd) + return ERR_PTR(-ENOSYS); + + xrcd = device->alloc_xrcd(device, NULL, NULL); + if (!IS_ERR(xrcd)) { + xrcd->device = device; + atomic_set(&xrcd->usecnt, 0); + } + + return xrcd; +} +EXPORT_SYMBOL(ib_alloc_xrcd); + +int ib_dealloc_xrcd(struct ib_xrcd *xrcd) +{ + if (atomic_read(&xrcd->usecnt)) + return -EBUSY; + + return xrcd->device->dealloc_xrcd(xrcd); +} +EXPORT_SYMBOL(ib_dealloc_xrcd); diff --git a/trunk/drivers/infiniband/hw/qib/qib.h b/trunk/drivers/infiniband/hw/qib/qib.h index b881bdc401f5..c9624ea87209 100644 --- a/trunk/drivers/infiniband/hw/qib/qib.h +++ b/trunk/drivers/infiniband/hw/qib/qib.h @@ -171,9 +171,7 @@ struct qib_ctxtdata { /* how many alloc_pages() chunks in rcvegrbuf_pages */ u32 rcvegrbuf_chunks; /* how many egrbufs per chunk */ - u16 rcvegrbufs_perchunk; - /* ilog2 of above */ - u16 rcvegrbufs_perchunk_shift; + u32 rcvegrbufs_perchunk; /* order for rcvegrbuf_pages */ size_t rcvegrbuf_size; /* rcvhdrq size (for freeing) */ @@ -223,9 +221,6 @@ struct qib_ctxtdata { /* ctxt rcvhdrq head offset */ u32 head; u32 pkt_count; - /* lookaside fields */ - struct qib_qp *lookaside_qp; - u32 lookaside_qpn; /* QPs waiting for context processing */ struct list_head qp_wait_list; }; @@ -812,10 +807,6 @@ struct qib_devdata { * supports, less gives more pio bufs/ctxt, etc. */ u32 cfgctxts; - /* - * number of ctxts available for PSM open - */ - u32 freectxts; /* * hint that we should update pioavailshadow before @@ -945,9 +936,7 @@ struct qib_devdata { /* chip address space used by 4k pio buffers */ u32 align4k; /* size of each rcvegrbuffer */ - u16 rcvegrbufsize; - /* log2 of above */ - u16 rcvegrbufsize_shift; + u32 rcvegrbufsize; /* localbus width (1, 2,4,8,16,32) from config space */ u32 lbus_width; /* localbus speed in MHz */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_driver.c b/trunk/drivers/infiniband/hw/qib/qib_driver.c index 9a9047f385ae..23e584f4c36c 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_driver.c +++ b/trunk/drivers/infiniband/hw/qib/qib_driver.c @@ -279,10 +279,10 @@ int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate) */ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) { - const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift; - const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1); + const u32 chunk = etail / rcd->rcvegrbufs_perchunk; + const u32 idx = etail % rcd->rcvegrbufs_perchunk; - return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); + return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize; } /* @@ -310,6 +310,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, u32 opcode; u32 psn; int diff; + unsigned long flags; /* Sanity check packet */ if (tlen < 24) @@ -364,6 +365,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, switch (qp->ibqp.qp_type) { case IB_QPT_RC: + spin_lock_irqsave(&qp->s_lock, flags); ruc_res = qib_ruc_check_hdr( ibp, hdr, @@ -371,8 +373,11 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, qp, be32_to_cpu(ohdr->bth[0])); if (ruc_res) { + spin_unlock_irqrestore(&qp->s_lock, + flags); goto unlock; } + spin_unlock_irqrestore(&qp->s_lock, flags); /* Only deal with RDMA Writes for now */ if (opcode < @@ -542,15 +547,6 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) updegr = 0; } } - /* - * Notify qib_destroy_qp() if it is waiting - * for lookaside_qp to finish. - */ - if (rcd->lookaside_qp) { - if (atomic_dec_and_test(&rcd->lookaside_qp->refcount)) - wake_up(&rcd->lookaside_qp->wait); - rcd->lookaside_qp = NULL; - } rcd->head = l; rcd->pkt_count += i; diff --git a/trunk/drivers/infiniband/hw/qib/qib_file_ops.c b/trunk/drivers/infiniband/hw/qib/qib_file_ops.c index 77633666f81c..26253039d2c7 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/trunk/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1284,7 +1284,6 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); ctxt_fp(fp) = rcd; qib_stats.sps_ctxts++; - dd->freectxts++; ret = 0; goto bail; @@ -1793,7 +1792,6 @@ static int qib_close(struct inode *in, struct file *fp) if (dd->pageshadow) unlock_expected_tids(rcd); qib_stats.sps_ctxts--; - dd->freectxts--; } mutex_unlock(&qib_mutex); diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba6120.c b/trunk/drivers/infiniband/hw/qib/qib_iba6120.c index 781a802a321f..d8ca0a0b970d 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba6120.c @@ -3273,8 +3273,6 @@ static int init_6120_variables(struct qib_devdata *dd) /* we always allocate at least 2048 bytes for eager buffers */ ret = ib_mtu_enum_to_int(qib_ibmtu); dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; - BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); - dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); qib_6120_tidtemplate(dd); diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c index 3f1d562ba898..e1f947446c2a 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c @@ -4085,8 +4085,6 @@ static int qib_init_7220_variables(struct qib_devdata *dd) /* we always allocate at least 2048 bytes for eager buffers */ ret = ib_mtu_enum_to_int(qib_ibmtu); dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU; - BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); - dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); qib_7220_tidtemplate(dd); diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c index efd0a110091f..5ea9ece23b33 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2310,15 +2310,12 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); - ppd->cpspec->ibcctrl_a = val; /* * Reset the PCS interface to the serdes (and also ibc, which is still * in reset from above). Writes new value of ibcctrl_a as last step. */ qib_7322_mini_pcs_reset(ppd); qib_write_kreg(dd, kr_scratch, 0ULL); - /* clear the linkinit cmds */ - ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); if (!ppd->cpspec->ibcctrl_b) { unsigned lse = ppd->link_speed_enabled; @@ -2390,6 +2387,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); + /* Hold the link state machine for mezz boards */ + if (IS_QMH(dd) || IS_QME(dd)) + qib_set_ib_7322_lstate(ppd, 0, + QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); + /* Also enable IBSTATUSCHG interrupt. */ val = qib_read_kreg_port(ppd, krp_errmask); qib_write_kreg_port(ppd, krp_errmask, @@ -2851,8 +2853,9 @@ static irqreturn_t qib_7322intr(int irq, void *data) for (i = 0; i < dd->first_user_ctxt; i++) { if (ctxtrbits & rmask) { ctxtrbits &= ~rmask; - if (dd->rcd[i]) + if (dd->rcd[i]) { qib_kreceive(dd->rcd[i], NULL, &npkts); + } } rmask <<= 1; } @@ -5227,8 +5230,6 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) QIBL_IB_AUTONEG_INPROG))) set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { - struct qib_qsfp_data *qd = - &ppd->cpspec->qsfp_data; /* unlock the Tx settings, speed may change */ qib_write_kreg_port(ppd, krp_tx_deemph_override, SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, @@ -5236,12 +5237,6 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) qib_cancel_sends(ppd); /* on link down, ensure sane pcs state */ qib_7322_mini_pcs_reset(ppd); - /* schedule the qsfp refresh which should turn the link - off */ - if (ppd->dd->flags & QIB_HAS_QSFP) { - qd->t_insert = get_jiffies_64(); - schedule_work(&qd->work); - } spin_lock_irqsave(&ppd->sdma_lock, flags); if (__qib_sdma_running(ppd)) __qib_sdma_process_event(ppd, @@ -5592,79 +5587,43 @@ static void qsfp_7322_event(struct work_struct *work) struct qib_qsfp_data *qd; struct qib_pportdata *ppd; u64 pwrup; - unsigned long flags; int ret; u32 le2; qd = container_of(work, struct qib_qsfp_data, work); ppd = qd->ppd; - pwrup = qd->t_insert + - msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC); - - /* Delay for 20 msecs to allow ModPrs resistor to setup */ - mdelay(QSFP_MODPRS_LAG_MSEC); - - if (!qib_qsfp_mod_present(ppd)) { - ppd->cpspec->qsfp_data.modpresent = 0; - /* Set the physical link to disabled */ - qib_set_ib_7322_lstate(ppd, 0, - QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); - spin_lock_irqsave(&ppd->lflags_lock, flags); - ppd->lflags &= ~QIBL_LINKV; - spin_unlock_irqrestore(&ppd->lflags_lock, flags); - } else { - /* - * Some QSFP's not only do not respond until the full power-up - * time, but may behave badly if we try. So hold off responding - * to insertion. - */ - while (1) { - u64 now = get_jiffies_64(); - if (time_after64(now, pwrup)) - break; - msleep(20); - } - - ret = qib_refresh_qsfp_cache(ppd, &qd->cache); + pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC); - /* - * Need to change LE2 back to defaults if we couldn't - * read the cable type (to handle cable swaps), so do this - * even on failure to read cable information. We don't - * get here for QME, so IS_QME check not needed here. - */ - if (!ret && !ppd->dd->cspec->r1) { - if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) - le2 = LE2_QME; - else if (qd->cache.atten[1] >= qib_long_atten && - QSFP_IS_CU(qd->cache.tech)) - le2 = LE2_5m; - else - le2 = LE2_DEFAULT; - } else - le2 = LE2_DEFAULT; - ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); - /* - * We always change parameteters, since we can choose - * values for cables without eeproms, and the cable may have - * changed from a cable with full or partial eeprom content - * to one with partial or no content. - */ - init_txdds_table(ppd, 0); - /* The physical link is being re-enabled only when the - * previous state was DISABLED and the VALID bit is not - * set. This should only happen when the cable has been - * physically pulled. */ - if (!ppd->cpspec->qsfp_data.modpresent && - (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) { - ppd->cpspec->qsfp_data.modpresent = 1; - qib_set_ib_7322_lstate(ppd, 0, - QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); - spin_lock_irqsave(&ppd->lflags_lock, flags); - ppd->lflags |= QIBL_LINKV; - spin_unlock_irqrestore(&ppd->lflags_lock, flags); - } + /* + * Some QSFP's not only do not respond until the full power-up + * time, but may behave badly if we try. So hold off responding + * to insertion. + */ + while (1) { + u64 now = get_jiffies_64(); + if (time_after64(now, pwrup)) + break; + msleep(20); } + ret = qib_refresh_qsfp_cache(ppd, &qd->cache); + /* + * Need to change LE2 back to defaults if we couldn't + * read the cable type (to handle cable swaps), so do this + * even on failure to read cable information. We don't + * get here for QME, so IS_QME check not needed here. + */ + if (!ret && !ppd->dd->cspec->r1) { + if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) + le2 = LE2_QME; + else if (qd->cache.atten[1] >= qib_long_atten && + QSFP_IS_CU(qd->cache.tech)) + le2 = LE2_5m; + else + le2 = LE2_DEFAULT; + } else + le2 = LE2_DEFAULT; + ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); + init_txdds_table(ppd, 0); } /* @@ -5768,8 +5727,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) /* now change the IBC and serdes, overriding generic */ init_txdds_table(ppd, 1); /* Re-enable the physical state machine on mezz boards - * now that the correct settings have been set. - * QSFP boards are handles by the QSFP event handler */ + * now that the correct settings have been set. */ if (IS_QMH(dd) || IS_QME(dd)) qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); @@ -6247,8 +6205,6 @@ static int qib_init_7322_variables(struct qib_devdata *dd) /* we always allocate at least 2048 bytes for eager buffers */ dd->rcvegrbufsize = max(mtu, 2048); - BUG_ON(!is_power_of_2(dd->rcvegrbufsize)); - dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize); qib_7322_tidtemplate(dd); @@ -7191,8 +7147,7 @@ static void find_best_ent(struct qib_pportdata *ppd, } } - /* Active cables don't have attenuation so we only set SERDES - * settings to account for the attenuation of the board traces. */ + /* Lookup serdes setting by cable type and attenuation */ if (!override && QSFP_IS_ACTIVE(qd->tech)) { *sdr_dds = txdds_sdr + ppd->dd->board_atten; *ddr_dds = txdds_ddr + ppd->dd->board_atten; @@ -7509,6 +7464,12 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) u32 le_val, rxcaldone; int chan, chan_done = (1 << SERDES_CHANS) - 1; + /* + * Initialize the Tx DDS tables. Also done every QSFP event, + * for adapters with QSFP + */ + init_txdds_table(ppd, 0); + /* Clear cmode-override, may be set from older driver */ ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); @@ -7694,12 +7655,6 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) /* VGA output common mode */ ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); - /* - * Initialize the Tx DDS tables. Also done every QSFP event, - * for adapters with QSFP - */ - init_txdds_table(ppd, 0); - return 0; } diff --git a/trunk/drivers/infiniband/hw/qib/qib_init.c b/trunk/drivers/infiniband/hw/qib/qib_init.c index b093a0b53b2f..a01f3fce8eb3 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_init.c +++ b/trunk/drivers/infiniband/hw/qib/qib_init.c @@ -183,9 +183,6 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt) rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt + rcd->rcvegrbufs_perchunk - 1) / rcd->rcvegrbufs_perchunk; - BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk)); - rcd->rcvegrbufs_perchunk_shift = - ilog2(rcd->rcvegrbufs_perchunk); } return rcd; } @@ -401,7 +398,6 @@ static void enable_chip(struct qib_devdata *dd) if (rcd) dd->f_rcvctrl(rcd->ppd, rcvmask, i); } - dd->freectxts = dd->cfgctxts - dd->first_user_ctxt; } static void verify_interrupt(unsigned long opaque) @@ -585,6 +581,10 @@ int qib_init(struct qib_devdata *dd, int reinit) continue; } + /* let link come up, and enable IBC */ + spin_lock_irqsave(&ppd->lflags_lock, flags); + ppd->lflags &= ~QIBL_IB_LINK_DISABLED; + spin_unlock_irqrestore(&ppd->lflags_lock, flags); portok++; } diff --git a/trunk/drivers/infiniband/hw/qib/qib_qp.c b/trunk/drivers/infiniband/hw/qib/qib_qp.c index 7e7e16fbee99..e16751f8639e 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_qp.c +++ b/trunk/drivers/infiniband/hw/qib/qib_qp.c @@ -34,7 +34,6 @@ #include #include -#include #include "qib.h" @@ -205,13 +204,6 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); } -static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) -{ - return jhash_1word(qpn, dev->qp_rnd) & - (dev->qp_table_size - 1); -} - - /* * Put the QP into the hash table. * The hash table holds a reference to the QP. @@ -219,23 +211,22 @@ static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) { struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + unsigned n = qp->ibqp.qp_num % dev->qp_table_size; unsigned long flags; - unsigned n = qpn_hash(dev, qp->ibqp.qp_num); spin_lock_irqsave(&dev->qpt_lock, flags); - atomic_inc(&qp->refcount); if (qp->ibqp.qp_num == 0) - rcu_assign_pointer(ibp->qp0, qp); + ibp->qp0 = qp; else if (qp->ibqp.qp_num == 1) - rcu_assign_pointer(ibp->qp1, qp); + ibp->qp1 = qp; else { qp->next = dev->qp_table[n]; - rcu_assign_pointer(dev->qp_table[n], qp); + dev->qp_table[n] = qp; } + atomic_inc(&qp->refcount); spin_unlock_irqrestore(&dev->qpt_lock, flags); - synchronize_rcu(); } /* @@ -245,32 +236,29 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) { struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - unsigned n = qpn_hash(dev, qp->ibqp.qp_num); + struct qib_qp *q, **qpp; unsigned long flags; + qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size]; + spin_lock_irqsave(&dev->qpt_lock, flags); if (ibp->qp0 == qp) { + ibp->qp0 = NULL; atomic_dec(&qp->refcount); - rcu_assign_pointer(ibp->qp0, NULL); } else if (ibp->qp1 == qp) { + ibp->qp1 = NULL; atomic_dec(&qp->refcount); - rcu_assign_pointer(ibp->qp1, NULL); - } else { - struct qib_qp *q, **qpp; - - qpp = &dev->qp_table[n]; + } else for (; (q = *qpp) != NULL; qpp = &q->next) if (q == qp) { - atomic_dec(&qp->refcount); - rcu_assign_pointer(*qpp, qp->next); + *qpp = qp->next; qp->next = NULL; + atomic_dec(&qp->refcount); break; } - } spin_unlock_irqrestore(&dev->qpt_lock, flags); - synchronize_rcu(); } /** @@ -292,24 +280,21 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) if (!qib_mcast_tree_empty(ibp)) qp_inuse++; - rcu_read_lock(); - if (rcu_dereference(ibp->qp0)) + if (ibp->qp0) qp_inuse++; - if (rcu_dereference(ibp->qp1)) + if (ibp->qp1) qp_inuse++; - rcu_read_unlock(); } spin_lock_irqsave(&dev->qpt_lock, flags); for (n = 0; n < dev->qp_table_size; n++) { qp = dev->qp_table[n]; - rcu_assign_pointer(dev->qp_table[n], NULL); + dev->qp_table[n] = NULL; for (; qp; qp = qp->next) qp_inuse++; } spin_unlock_irqrestore(&dev->qpt_lock, flags); - synchronize_rcu(); return qp_inuse; } @@ -324,28 +309,25 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) */ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) { - struct qib_qp *qp = NULL; + struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; + unsigned long flags; + struct qib_qp *qp; - if (unlikely(qpn <= 1)) { - rcu_read_lock(); - if (qpn == 0) - qp = rcu_dereference(ibp->qp0); - else - qp = rcu_dereference(ibp->qp1); - } else { - struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; - unsigned n = qpn_hash(dev, qpn); + spin_lock_irqsave(&dev->qpt_lock, flags); - rcu_read_lock(); - for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next) + if (qpn == 0) + qp = ibp->qp0; + else if (qpn == 1) + qp = ibp->qp1; + else + for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp; + qp = qp->next) if (qp->ibqp.qp_num == qpn) break; - } if (qp) - if (unlikely(!atomic_inc_not_zero(&qp->refcount))) - qp = NULL; + atomic_inc(&qp->refcount); - rcu_read_unlock(); + spin_unlock_irqrestore(&dev->qpt_lock, flags); return qp; } @@ -783,10 +765,8 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } } - if (attr_mask & IB_QP_PATH_MTU) { + if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = pmtu; - qp->pmtu = ib_mtu_enum_to_int(pmtu); - } if (attr_mask & IB_QP_RETRY_CNT) { qp->s_retry_cnt = attr->retry_cnt; @@ -801,12 +781,8 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; - if (attr_mask & IB_QP_TIMEOUT) { + if (attr_mask & IB_QP_TIMEOUT) qp->timeout = attr->timeout; - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); - } if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; @@ -1037,10 +1013,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, ret = ERR_PTR(-ENOMEM); goto bail_swq; } - RCU_INIT_POINTER(qp->next, NULL); - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); if (init_attr->srq) sz = 0; else { diff --git a/trunk/drivers/infiniband/hw/qib/qib_qsfp.c b/trunk/drivers/infiniband/hw/qib/qib_qsfp.c index e06c4ed383f1..3374a52232c1 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/trunk/drivers/infiniband/hw/qib/qib_qsfp.c @@ -273,12 +273,18 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) int ret; int idx; u16 cks; + u32 mask; u8 peek[4]; /* ensure sane contents on invalid reads, for cable swaps */ memset(cp, 0, sizeof(*cp)); - if (!qib_qsfp_mod_present(ppd)) { + mask = QSFP_GPIO_MOD_PRS_N; + if (ppd->hw_pidx) + mask <<= QSFP_GPIO_PORT2_SHIFT; + + ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); + if (ret & mask) { ret = -ENODEV; goto bail; } @@ -438,19 +444,6 @@ const char * const qib_qsfp_devtech[16] = { static const char *pwr_codes = "1.5W2.0W2.5W3.5W"; -int qib_qsfp_mod_present(struct qib_pportdata *ppd) -{ - u32 mask; - int ret; - - mask = QSFP_GPIO_MOD_PRS_N << - (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT); - ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0); - - return !((ret & mask) >> - ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3)); -} - /* * Initialize structures that control access to QSFP. Called once per port * on cards that support QSFP. @@ -459,6 +452,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, void (*fevent)(struct work_struct *)) { u32 mask, highs; + int pins; struct qib_devdata *dd = qd->ppd->dd; @@ -486,7 +480,8 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, mask <<= QSFP_GPIO_PORT2_SHIFT; /* Do not try to wait here. Better to let event handle it */ - if (!qib_qsfp_mod_present(qd->ppd)) + pins = dd->f_gpio_mod(dd, 0, 0, 0); + if (pins & mask) goto bail; /* We see a module, but it may be unwise to look yet. Just schedule */ qd->t_insert = get_jiffies_64(); diff --git a/trunk/drivers/infiniband/hw/qib/qib_qsfp.h b/trunk/drivers/infiniband/hw/qib/qib_qsfp.h index 46002a9417c0..c109bbdc90ac 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_qsfp.h +++ b/trunk/drivers/infiniband/hw/qib/qib_qsfp.h @@ -34,7 +34,6 @@ #define QSFP_DEV 0xA0 #define QSFP_PWR_LAG_MSEC 2000 -#define QSFP_MODPRS_LAG_MSEC 20 /* * Below are masks for various QSFP signals, for Port 1. @@ -178,12 +177,10 @@ struct qib_qsfp_data { struct work_struct work; struct qib_qsfp_cache cache; u64 t_insert; - u8 modpresent; }; extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp); -extern int qib_qsfp_mod_present(struct qib_pportdata *ppd); extern void qib_qsfp_init(struct qib_qsfp_data *qd, void (*fevent)(struct work_struct *)); extern void qib_qsfp_deinit(struct qib_qsfp_data *qd); diff --git a/trunk/drivers/infiniband/hw/qib/qib_rc.c b/trunk/drivers/infiniband/hw/qib/qib_rc.c index afaf4ac79f42..eca0c41f1226 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_rc.c +++ b/trunk/drivers/infiniband/hw/qib/qib_rc.c @@ -59,7 +59,8 @@ static void start_timer(struct qib_qp *qp) qp->s_flags |= QIB_S_TIMER; qp->s_timer.function = rc_timeout; /* 4.096 usec. * (1 << qp->timeout) */ - qp->s_timer.expires = jiffies + qp->timeout_jiffies; + qp->s_timer.expires = jiffies + + usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); add_timer(&qp->s_timer); } @@ -238,7 +239,7 @@ int qib_make_rc_req(struct qib_qp *qp) u32 len; u32 bth0; u32 bth2; - u32 pmtu = qp->pmtu; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); char newreq; unsigned long flags; int ret = 0; @@ -1518,7 +1519,9 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, * 4.096 usec. * (1 << qp->timeout) */ qp->s_flags |= QIB_S_TIMER; - mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); + mod_timer(&qp->s_timer, jiffies + + usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / + 1000UL)); if (qp->s_flags & QIB_S_WAIT_ACK) { qp->s_flags &= ~QIB_S_WAIT_ACK; qib_schedule_send(qp); @@ -1729,7 +1732,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, * same request. */ offset = ((psn - e->psn) & QIB_PSN_MASK) * - qp->pmtu; + ib_mtu_enum_to_int(qp->path_mtu); len = be32_to_cpu(reth->length); if (unlikely(offset + len != e->rdma_sge.sge_length)) goto unlock_done; @@ -1873,7 +1876,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, u32 psn; u32 pad; struct ib_wc wc; - u32 pmtu = qp->pmtu; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); int diff; struct ib_reth *reth; unsigned long flags; @@ -1889,8 +1892,10 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, } opcode = be32_to_cpu(ohdr->bth[0]); + spin_lock_irqsave(&qp->s_lock, flags); if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) - return; + goto sunlock; + spin_unlock_irqrestore(&qp->s_lock, flags); psn = be32_to_cpu(ohdr->bth[2]); opcode >>= 24; @@ -1950,6 +1955,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, break; } + memset(&wc, 0, sizeof wc); + if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { qp->r_flags |= QIB_R_COMM_EST; if (qp->ibqp.event_handler) { @@ -2002,19 +2009,16 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, goto rnr_nak; qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) - goto no_immediate_data; - /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */ + goto send_last; + /* FALLTHROUGH */ case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; - goto send_last; + /* FALLTHROUGH */ case OP(SEND_LAST): case OP(RDMA_WRITE_LAST): -no_immediate_data: - wc.wc_flags = 0; - wc.ex.imm_data = 0; send_last: /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; @@ -2047,12 +2051,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; - /* zero fields that are N/A */ - wc.vendor_err = 0; - wc.pkey_index = 0; - wc.dlid_path_bits = 0; - wc.port_num = 0; - wc.csum_ok = 0; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & @@ -2091,7 +2089,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, if (opcode == OP(RDMA_WRITE_FIRST)) goto send_middle; else if (opcode == OP(RDMA_WRITE_ONLY)) - goto no_immediate_data; + goto send_last; ret = qib_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; diff --git a/trunk/drivers/infiniband/hw/qib/qib_ruc.c b/trunk/drivers/infiniband/hw/qib/qib_ruc.c index b4b37e47321a..eb78d9367f06 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_ruc.c +++ b/trunk/drivers/infiniband/hw/qib/qib_ruc.c @@ -260,15 +260,12 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) /* * - * This should be called with the QP r_lock held. - * - * The s_lock will be acquired around the qib_migrate_qp() call. + * This should be called with the QP s_lock held. */ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) { __be64 guid; - unsigned long flags; if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { if (!has_grh) { @@ -298,9 +295,7 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) goto err; - spin_lock_irqsave(&qp->s_lock, flags); qib_migrate_qp(qp); - spin_unlock_irqrestore(&qp->s_lock, flags); } else { if (!has_grh) { if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) diff --git a/trunk/drivers/infiniband/hw/qib/qib_sysfs.c b/trunk/drivers/infiniband/hw/qib/qib_sysfs.c index 78fbd56879d4..14d129de4320 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/trunk/drivers/infiniband/hw/qib/qib_sysfs.c @@ -515,7 +515,8 @@ static ssize_t show_nfreectxts(struct device *device, struct qib_devdata *dd = dd_from_dev(dev); /* Return the number of free user ports (contexts) available. */ - return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts); + return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts - + dd->first_user_ctxt - (u32)qib_stats.sps_ctxts); } static ssize_t show_serial(struct device *device, diff --git a/trunk/drivers/infiniband/hw/qib/qib_uc.c b/trunk/drivers/infiniband/hw/qib/qib_uc.c index 847e7afdfd94..32ccf3c824ca 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_uc.c +++ b/trunk/drivers/infiniband/hw/qib/qib_uc.c @@ -51,7 +51,7 @@ int qib_make_uc_req(struct qib_qp *qp) u32 hwords; u32 bth0; u32 len; - u32 pmtu = qp->pmtu; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); int ret = 0; spin_lock_irqsave(&qp->s_lock, flags); @@ -243,12 +243,13 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) { struct qib_other_headers *ohdr; + unsigned long flags; u32 opcode; u32 hdrsize; u32 psn; u32 pad; struct ib_wc wc; - u32 pmtu = qp->pmtu; + u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); struct ib_reth *reth; int ret; @@ -262,11 +263,14 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, } opcode = be32_to_cpu(ohdr->bth[0]); + spin_lock_irqsave(&qp->s_lock, flags); if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) - return; + goto sunlock; + spin_unlock_irqrestore(&qp->s_lock, flags); psn = be32_to_cpu(ohdr->bth[2]); opcode >>= 24; + memset(&wc, 0, sizeof wc); /* Compare the PSN verses the expected PSN. */ if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { @@ -366,7 +370,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, } qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) - goto no_immediate_data; + goto send_last; else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) goto send_last_imm; /* FALLTHROUGH */ @@ -385,11 +389,8 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, wc.ex.imm_data = ohdr->u.imm_data; hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; - goto send_last; + /* FALLTHROUGH */ case OP(SEND_LAST): -no_immediate_data: - wc.ex.imm_data = 0; - wc.wc_flags = 0; send_last: /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; @@ -417,12 +418,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; - /* zero fields that are N/A */ - wc.vendor_err = 0; - wc.pkey_index = 0; - wc.dlid_path_bits = 0; - wc.port_num = 0; - wc.csum_ok = 0; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & @@ -551,4 +546,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); return; +sunlock: + spin_unlock_irqrestore(&qp->s_lock, flags); } diff --git a/trunk/drivers/infiniband/hw/qib/qib_verbs.c b/trunk/drivers/infiniband/hw/qib/qib_verbs.c index 9627cb737125..9fab40488850 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_verbs.c +++ b/trunk/drivers/infiniband/hw/qib/qib_verbs.c @@ -38,12 +38,11 @@ #include #include #include -#include #include "qib.h" #include "qib_common.h" -static unsigned int ib_qib_qp_table_size = 256; +static unsigned int ib_qib_qp_table_size = 251; module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); @@ -660,25 +659,17 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { - if (rcd->lookaside_qp) { - if (rcd->lookaside_qpn != qp_num) { - if (atomic_dec_and_test( - &rcd->lookaside_qp->refcount)) - wake_up( - &rcd->lookaside_qp->wait); - rcd->lookaside_qp = NULL; - } - } - if (!rcd->lookaside_qp) { - qp = qib_lookup_qpn(ibp, qp_num); - if (!qp) - goto drop; - rcd->lookaside_qp = qp; - rcd->lookaside_qpn = qp_num; - } else - qp = rcd->lookaside_qp; + qp = qib_lookup_qpn(ibp, qp_num); + if (!qp) + goto drop; ibp->n_unicast_rcv++; qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); + /* + * Notify qib_destroy_qp() if it is waiting + * for us to finish. + */ + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); } return; @@ -1983,8 +1974,6 @@ static void init_ibport(struct qib_pportdata *ppd) ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; ibp->z_vl15_dropped = cntrs.vl15_dropped; - RCU_INIT_POINTER(ibp->qp0, NULL); - RCU_INIT_POINTER(ibp->qp1, NULL); } /** @@ -2001,15 +1990,12 @@ int qib_register_ib_device(struct qib_devdata *dd) int ret; dev->qp_table_size = ib_qib_qp_table_size; - get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); - dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, + dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, GFP_KERNEL); if (!dev->qp_table) { ret = -ENOMEM; goto err_qpt; } - for (i = 0; i < dev->qp_table_size; i++) - RCU_INIT_POINTER(dev->qp_table[i], NULL); for (i = 0; i < dd->num_pports; i++) init_ibport(ppd + i); diff --git a/trunk/drivers/infiniband/hw/qib/qib_verbs.h b/trunk/drivers/infiniband/hw/qib/qib_verbs.h index 0c19ef0c4123..95e5b47223b3 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_verbs.h +++ b/trunk/drivers/infiniband/hw/qib/qib_verbs.h @@ -485,7 +485,6 @@ struct qib_qp { u8 alt_timeout; /* Alternate path timeout for this QP */ u8 port_num; enum ib_mtu path_mtu; - u32 pmtu; /* decoded from path_mtu */ u32 remote_qpn; u32 qkey; /* QKEY for this QP (for UD or RD) */ u32 s_size; /* send work queue size */ @@ -496,7 +495,6 @@ struct qib_qp { u32 s_last; /* last completed entry */ u32 s_ssn; /* SSN of tail entry */ u32 s_lsn; /* limit sequence number (credit) */ - unsigned long timeout_jiffies; /* computed from timeout */ struct qib_swqe *s_wq; /* send work queue */ struct qib_swqe *s_wqe; struct qib_rq r_rq; /* receive work queue */ @@ -725,8 +723,7 @@ struct qib_ibdev { dma_addr_t pio_hdrs_phys; /* list of QPs waiting for RNR timer */ spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ - u32 qp_table_size; /* size of the hash table */ - u32 qp_rnd; /* random bytes for hash */ + unsigned qp_table_size; /* size of the hash table */ spinlock_t qpt_lock; u32 n_piowait; diff --git a/trunk/include/rdma/ib_verbs.h b/trunk/include/rdma/ib_verbs.h index 228be3e220d9..d2a5c9b991d1 100644 --- a/trunk/include/rdma/ib_verbs.h +++ b/trunk/include/rdma/ib_verbs.h @@ -112,6 +112,7 @@ enum ib_device_cap_flags { */ IB_DEVICE_UD_IP_CSUM = (1<<18), IB_DEVICE_UD_TSO = (1<<19), + IB_DEVICE_XRC = (1<<20), IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), }; @@ -858,6 +859,11 @@ struct ib_pd { atomic_t usecnt; /* count all resources */ }; +struct ib_xrcd { + struct ib_device *device; + atomic_t usecnt; /* count all resources */ +}; + struct ib_ah { struct ib_device *device; struct ib_pd *pd; @@ -1149,6 +1155,10 @@ struct ib_device { struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad); + struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, + struct ib_ucontext *ucontext, + struct ib_udata *udata); + int (*dealloc_xrcd)(struct ib_xrcd *xrcd); struct ib_dma_mapping_ops *dma_ops; @@ -2060,4 +2070,16 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); */ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); +/** + * ib_alloc_xrcd - Allocates an XRC domain. + * @device: The device on which to allocate the XRC domain. + */ +struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); + +/** + * ib_dealloc_xrcd - Deallocates an XRC domain. + * @xrcd: The XRC domain to deallocate. + */ +int ib_dealloc_xrcd(struct ib_xrcd *xrcd); + #endif /* IB_VERBS_H */