diff --git a/[refs] b/[refs] index 612e5e4a3898..a5fbe97a3599 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 45c448a1c0bed9c7eab6064493a7c5d94defcafd +refs/heads/master: 60f2b652f54aa4ac4127a538abad05235fb9c469 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 989ff1149390..8dca9d89c6c1 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -439,7 +439,7 @@ F: drivers/hwmon/ams/ AMSO1100 RNIC DRIVER M: Tom Tucker M: Steve Wise -L: linux-rdma@vger.kernel.org +L: general@lists.openfabrics.org S: Maintained F: drivers/infiniband/hw/amso1100/ @@ -1494,7 +1494,7 @@ F: drivers/net/cxgb3/ CXGB3 IWARP RNIC DRIVER (IW_CXGB3) M: Steve Wise -L: linux-rdma@vger.kernel.org +L: general@lists.openfabrics.org W: http://www.openfabrics.org S: Supported F: drivers/infiniband/hw/cxgb3/ @@ -1868,7 +1868,7 @@ F: fs/efs/ EHCA (IBM GX bus InfiniBand adapter) DRIVER M: Hoang-Nam Nguyen M: Christoph Raisch -L: linux-rdma@vger.kernel.org +L: general@lists.openfabrics.org S: Supported F: drivers/infiniband/hw/ehca/ @@ -2552,7 +2552,7 @@ INFINIBAND SUBSYSTEM M: Roland Dreier M: Sean Hefty M: Hal Rosenstock -L: linux-rdma@vger.kernel.org +L: general@lists.openfabrics.org (moderated for non-subscribers) W: http://www.openib.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git S: Supported @@ -2729,7 +2729,7 @@ F: drivers/net/ipg.c IPATH DRIVER M: Ralph Campbell -L: linux-rdma@vger.kernel.org +L: general@lists.openfabrics.org T: git git://git.qlogic.com/ipath-linux-2.6 S: Supported F: drivers/infiniband/hw/ipath/ @@ -3485,7 +3485,7 @@ F: drivers/scsi/NCR_D700.* NETEFFECT IWARP RNIC DRIVER (IW_NES) M: Faisal Latif M: Chien Tung -L: linux-rdma@vger.kernel.org +L: general@lists.openfabrics.org W: http://www.neteffect.com S: Supported F: drivers/infiniband/hw/nes/ diff --git a/trunk/drivers/infiniband/core/iwcm.c b/trunk/drivers/infiniband/core/iwcm.c index 55d093a36ae4..8f9509e1ebf7 100644 --- a/trunk/drivers/infiniband/core/iwcm.c +++ b/trunk/drivers/infiniband/core/iwcm.c @@ -362,7 +362,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id) * In either case, must tell the provider to reject. */ cm_id_priv->state = IW_CM_STATE_DESTROYING; - cm_id->device->iwcm->reject(cm_id, NULL, 0); break; case IW_CM_STATE_CONN_SENT: case IW_CM_STATE_DESTROYING: diff --git a/trunk/drivers/infiniband/core/mad.c b/trunk/drivers/infiniband/core/mad.c index 5cef8f87b96b..de922a04ca2d 100644 --- a/trunk/drivers/infiniband/core/mad.c +++ b/trunk/drivers/infiniband/core/mad.c @@ -51,7 +51,8 @@ static struct list_head ib_mad_port_list; static u32 ib_mad_client_id = 0; /* Port list lock */ -static DEFINE_SPINLOCK(ib_mad_port_list_lock); +static spinlock_t ib_mad_port_list_lock; + /* Forward declarations */ static int method_in_use(struct ib_mad_mgmt_method_table **method, @@ -2983,6 +2984,8 @@ static int __init ib_mad_init_module(void) { int ret; + spin_lock_init(&ib_mad_port_list_lock); + ib_mad_cache = kmem_cache_create("ib_mad", sizeof(struct ib_mad_private), 0, @@ -3018,3 +3021,4 @@ static void __exit ib_mad_cleanup_module(void) module_init(ib_mad_init_module); module_exit(ib_mad_cleanup_module); + diff --git a/trunk/drivers/infiniband/core/multicast.c b/trunk/drivers/infiniband/core/multicast.c index 8d82ba171353..107f170c57cd 100644 --- a/trunk/drivers/infiniband/core/multicast.c +++ b/trunk/drivers/infiniband/core/multicast.c @@ -106,8 +106,6 @@ struct mcast_group { struct ib_sa_query *query; int query_id; u16 pkey_index; - u8 leave_state; - int retries; }; struct mcast_member { @@ -352,7 +350,6 @@ static int send_leave(struct mcast_group *group, u8 leave_state) rec = group->rec; rec.join_state = leave_state; - group->leave_state = leave_state; ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, port->port_num, IB_SA_METHOD_DELETE, &rec, @@ -545,11 +542,7 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, { struct mcast_group *group = context; - if (status && group->retries > 0 && - !send_leave(group, group->leave_state)) - group->retries--; - else - mcast_work_handler(&group->work); + mcast_work_handler(&group->work); } static struct mcast_group *acquire_group(struct mcast_port *port, @@ -572,7 +565,6 @@ static struct mcast_group *acquire_group(struct mcast_port *port, if (!group) return NULL; - group->retries = 3; group->port = port; group->rec.mgid = *mgid; group->pkey_index = MCAST_INVALID_PKEY_INDEX; diff --git a/trunk/drivers/infiniband/core/sa_query.c b/trunk/drivers/infiniband/core/sa_query.c index 82543716d59e..1865049e80f7 100644 --- a/trunk/drivers/infiniband/core/sa_query.c +++ b/trunk/drivers/infiniband/core/sa_query.c @@ -109,10 +109,10 @@ static struct ib_client sa_client = { .remove = ib_sa_remove_one }; -static DEFINE_SPINLOCK(idr_lock); +static spinlock_t idr_lock; static DEFINE_IDR(query_idr); -static DEFINE_SPINLOCK(tid_lock); +static spinlock_t tid_lock; static u32 tid; #define PATH_REC_FIELD(field) \ @@ -1077,6 +1077,9 @@ static int __init ib_sa_init(void) { int ret; + spin_lock_init(&idr_lock); + spin_lock_init(&tid_lock); + get_random_bytes(&tid, sizeof tid); ret = ib_register_client(&sa_client); diff --git a/trunk/drivers/infiniband/core/smi.c b/trunk/drivers/infiniband/core/smi.c index 87236753bce9..5855e4405d9b 100644 --- a/trunk/drivers/infiniband/core/smi.c +++ b/trunk/drivers/infiniband/core/smi.c @@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, hop_cnt = smp->hop_cnt; /* See section 14.2.2.2, Vol 1 IB spec */ + /* C14-6 -- valid hop_cnt values are from 0 to 63 */ + if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) + return IB_SMI_DISCARD; + if (!ib_get_smp_direction(smp)) { /* C14-9:1 */ if (hop_cnt && hop_ptr == 0) { @@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, hop_cnt = smp->hop_cnt; /* See section 14.2.2.2, Vol 1 IB spec */ + /* C14-6 -- valid hop_cnt values are from 0 to 63 */ + if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) + return IB_SMI_DISCARD; + if (!ib_get_smp_direction(smp)) { /* C14-9:1 -- sender should have incremented hop_ptr */ if (hop_cnt && hop_ptr == 0) diff --git a/trunk/drivers/infiniband/core/uverbs_main.c b/trunk/drivers/infiniband/core/uverbs_main.c index d3fff9e008a3..eb36a81dd09b 100644 --- a/trunk/drivers/infiniband/core/uverbs_main.c +++ b/trunk/drivers/infiniband/core/uverbs_main.c @@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr); DEFINE_IDR(ib_uverbs_qp_idr); DEFINE_IDR(ib_uverbs_srq_idr); -static DEFINE_SPINLOCK(map_lock); +static spinlock_t map_lock; static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); @@ -584,16 +584,14 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if (hdr.command < 0 || hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || - !uverbs_cmd_table[hdr.command]) + !uverbs_cmd_table[hdr.command] || + !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) return -EINVAL; if (!file->ucontext && hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) return -EINVAL; - if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) - return -ENOSYS; - return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, hdr.in_words * 4, hdr.out_words * 4); } @@ -838,6 +836,8 @@ static int __init ib_uverbs_init(void) { int ret; + spin_lock_init(&map_lock); + ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, "infiniband_verbs"); if (ret) { diff --git a/trunk/drivers/infiniband/hw/amso1100/c2.c b/trunk/drivers/infiniband/hw/amso1100/c2.c index 8250740c94b0..0cfbb6d2f762 100644 --- a/trunk/drivers/infiniband/hw/amso1100/c2.c +++ b/trunk/drivers/infiniband/hw/amso1100/c2.c @@ -86,7 +86,11 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table); static void c2_print_macaddr(struct net_device *netdev) { - pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq); + pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " + "IRQ %u\n", netdev->name, + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], + netdev->irq); } static void c2_set_rxbufsize(struct c2_port *c2_port) diff --git a/trunk/drivers/infiniband/hw/amso1100/c2_provider.c b/trunk/drivers/infiniband/hw/amso1100/c2_provider.c index ad723bd8bf49..f1948fad85d7 100644 --- a/trunk/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/trunk/drivers/infiniband/hw/amso1100/c2_provider.c @@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev) /* Register pseudo network device */ dev->pseudo_netdev = c2_pseudo_netdev_init(dev); if (!dev->pseudo_netdev) - goto out; + goto out3; ret = register_netdev(dev->pseudo_netdev); if (ret) - goto out_free_netdev; + goto out2; pr_debug("%s:%u\n", __func__, __LINE__); strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); @@ -851,10 +851,6 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.post_recv = c2_post_receive; dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); - if (dev->ibdev.iwcm == NULL) { - ret = -ENOMEM; - goto out_unregister_netdev; - } dev->ibdev.iwcm->add_ref = c2_add_ref; dev->ibdev.iwcm->rem_ref = c2_rem_ref; dev->ibdev.iwcm->get_qp = c2_get_qp; @@ -866,25 +862,23 @@ int c2_register_device(struct c2_dev *dev) ret = ib_register_device(&dev->ibdev); if (ret) - goto out_free_iwcm; + goto out1; for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { ret = device_create_file(&dev->ibdev.dev, c2_dev_attributes[i]); if (ret) - goto out_unregister_ibdev; + goto out0; } - goto out; + goto out3; -out_unregister_ibdev: +out0: ib_unregister_device(&dev->ibdev); -out_free_iwcm: - kfree(dev->ibdev.iwcm); -out_unregister_netdev: +out1: unregister_netdev(dev->pseudo_netdev); -out_free_netdev: +out2: free_netdev(dev->pseudo_netdev); -out: +out3: pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); return ret; } diff --git a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c index 72ed3396b721..62f9cf2f94ec 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/trunk/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -852,9 +852,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) wqe->qpcaps = attr->qpcaps; wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); wqe->rqe_count = cpu_to_be16(attr->rqe_count); - wqe->flags_rtr_type = cpu_to_be16(attr->flags | - V_RTR_TYPE(attr->rtr_type) | - V_CHAN(attr->chan)); + wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type)); wqe->ord = cpu_to_be32(attr->ord); wqe->ird = cpu_to_be32(attr->ird); wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); @@ -1034,7 +1032,6 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p) err2: cxio_hal_destroy_ctrl_qp(rdev_p); err1: - rdev_p->t3cdev_p->ulp = NULL; list_del(&rdev_p->entry); return err; } diff --git a/trunk/drivers/infiniband/hw/cxgb3/cxio_wr.h b/trunk/drivers/infiniband/hw/cxgb3/cxio_wr.h index a197a5b7ac7f..32e3b1461d81 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/trunk/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -327,11 +327,6 @@ enum rdma_init_rtr_types { #define V_RTR_TYPE(x) ((x) << S_RTR_TYPE) #define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE) -#define S_CHAN 4 -#define M_CHAN 0x3 -#define V_CHAN(x) ((x) << S_CHAN) -#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN) - struct t3_rdma_init_attr { u32 tid; u32 qpid; @@ -351,7 +346,6 @@ struct t3_rdma_init_attr { u16 flags; u16 rqe_count; u32 irs; - u32 chan; }; struct t3_rdma_init_wr { diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch.c b/trunk/drivers/infiniband/hw/cxgb3/iwch.c index b0ea0105ddf6..26fc0a4eaa74 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch.c @@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS]; static void open_rnic_dev(struct t3cdev *); static void close_rnic_dev(struct t3cdev *); -static void iwch_event_handler(struct t3cdev *, u32, u32); +static void iwch_err_handler(struct t3cdev *, u32, u32); struct cxgb3_client t3c_client = { .name = "iw_cxgb3", @@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = { .remove = close_rnic_dev, .handlers = t3c_handlers, .redirect = iwch_ep_redirect, - .event_handler = iwch_event_handler + .err_handler = iwch_err_handler }; static LIST_HEAD(dev_list); @@ -105,9 +105,11 @@ static void rnic_init(struct iwch_dev *rnicp) static void open_rnic_dev(struct t3cdev *tdev) { struct iwch_dev *rnicp; + static int vers_printed; PDBG("%s t3cdev %p\n", __func__, tdev); - printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", + if (!vers_printed++) + printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION); rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); if (!rnicp) { @@ -160,36 +162,21 @@ static void close_rnic_dev(struct t3cdev *tdev) mutex_unlock(&dev_mutex); } -static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) +static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) { struct cxio_rdev *rdev = tdev->ulp; - struct iwch_dev *rnicp; + struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); struct ib_event event; - u32 portnum = port_id + 1; - if (!rdev) - return; - rnicp = rdev_to_iwch_dev(rdev); - switch (evt) { - case OFFLOAD_STATUS_DOWN: { + if (status == OFFLOAD_STATUS_DOWN) { rdev->flags = CXIO_ERROR_FATAL; + + event.device = &rnicp->ibdev; event.event = IB_EVENT_DEVICE_FATAL; - break; - } - case OFFLOAD_PORT_DOWN: { - event.event = IB_EVENT_PORT_ERR; - break; - } - case OFFLOAD_PORT_UP: { - event.event = IB_EVENT_PORT_ACTIVE; - break; - } + event.element.port_num = 0; + ib_dispatch_event(&event); } - event.device = &rnicp->ibdev; - event.element.port_num = portnum; - ib_dispatch_event(&event); - return; } diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c index 66b41351910a..52d7bb0c2a12 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -286,7 +286,7 @@ void __free_ep(struct kref *kref) ep = container_of(container_of(kref, struct iwch_ep_common, kref), struct iwch_ep, com); PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); - if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { + if (ep->com.flags & RELEASE_RESOURCES) { cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); dst_release(ep->dst); l2t_release(L2DATA(ep->com.tdev), ep->l2t); @@ -297,7 +297,7 @@ void __free_ep(struct kref *kref) static void release_ep_resources(struct iwch_ep *ep) { PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); - set_bit(RELEASE_RESOURCES, &ep->com.flags); + ep->com.flags |= RELEASE_RESOURCES; put_ep(&ep->com); } @@ -786,12 +786,10 @@ static void connect_request_upcall(struct iwch_ep *ep) event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); event.provider_data = ep; - if (state_read(&ep->parent_ep->com) != DEAD) { - get_ep(&ep->com); + if (state_read(&ep->parent_ep->com) != DEAD) ep->parent_ep->com.cm_id->event_handler( ep->parent_ep->com.cm_id, &event); - } put_ep(&ep->parent_ep->com); ep->parent_ep = NULL; } @@ -1158,7 +1156,8 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) * We get 2 abort replies from the HW. The first one must * be ignored except for scribbling that we need one more. */ - if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) { + if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) { + ep->com.flags |= ABORT_REQ_IN_PROGRESS; return CPL_RET_BUF_DONE; } @@ -1478,14 +1477,10 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or - * rejects the CR. Also wake up anyone waiting - * in rdma connection migration (see iwch_accept_cr()). + * rejects the CR. */ __state_set(&ep->com, CLOSING); - ep->com.rpl_done = 1; - ep->com.rpl_err = -ECONNRESET; - PDBG("waking up ep %p\n", ep); - wake_up(&ep->com.waitq); + get_ep(&ep->com); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); @@ -1566,7 +1561,8 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) * We get 2 peer aborts from the HW. The first one must * be ignored except for scribbling that we need one more. */ - if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) { + if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) { + ep->com.flags |= PEER_ABORT_IN_PROGRESS; return CPL_RET_BUF_DONE; } @@ -1593,13 +1589,9 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or - * rejects the CR. Also wake up anyone waiting - * in rdma connection migration (see iwch_accept_cr()). + * rejects the CR. */ - ep->com.rpl_done = 1; - ep->com.rpl_err = -ECONNRESET; - PDBG("waking up ep %p\n", ep); - wake_up(&ep->com.waitq); + get_ep(&ep->com); break; case MORIBUND: case CLOSING: @@ -1805,7 +1797,6 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) err = send_mpa_reject(ep, pdata, pdata_len); err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); } - put_ep(&ep->com); return 0; } @@ -1819,10 +1810,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct iwch_qp *qp = get_qhp(h, conn_param->qpn); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); - if (state_read(&ep->com) == DEAD) { - err = -ECONNRESET; - goto err; - } + if (state_read(&ep->com) == DEAD) + return -ECONNRESET; BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); BUG_ON(!qp); @@ -1830,14 +1819,15 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { abort_connection(ep, NULL, GFP_KERNEL); - err = -EINVAL; - goto err; + return -EINVAL; } cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; ep->com.qp = qp; + ep->com.rpl_done = 0; + ep->com.rpl_err = 0; ep->ird = conn_param->ird; ep->ord = conn_param->ord; @@ -1846,6 +1836,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); + get_ep(&ep->com); + /* bind QP to EP and move to RTS */ attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; @@ -1863,31 +1855,30 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) err = iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) - goto err1; + goto err; /* if needed, wait for wr_ack */ if (iwch_rqes_posted(qp)) { wait_event(ep->com.waitq, ep->com.rpl_done); err = ep->com.rpl_err; if (err) - goto err1; + goto err; } err = send_mpa_reply(ep, conn_param->private_data, conn_param->private_data_len); if (err) - goto err1; + goto err; state_set(&ep->com, FPDU_MODE); established_upcall(ep); put_ep(&ep->com); return 0; -err1: +err: ep->com.cm_id = NULL; ep->com.qp = NULL; cm_id->rem_ref(cm_id); -err: put_ep(&ep->com); return err; } @@ -2106,17 +2097,14 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) ep->com.state = CLOSING; start_ep_timer(ep); } - set_bit(CLOSE_SENT, &ep->com.flags); break; case CLOSING: - if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { - close = 1; - if (abrupt) { - stop_ep_timer(ep); - ep->com.state = ABORTING; - } else - ep->com.state = MORIBUND; - } + close = 1; + if (abrupt) { + stop_ep_timer(ep); + ep->com.state = ABORTING; + } else + ep->com.state = MORIBUND; break; case MORIBUND: case ABORTING: diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.h b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.h index b9efadfffb4f..43c0aea7eadc 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.h +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.h @@ -145,10 +145,9 @@ enum iwch_ep_state { }; enum iwch_ep_flags { - PEER_ABORT_IN_PROGRESS = 0, - ABORT_REQ_IN_PROGRESS = 1, - RELEASE_RESOURCES = 2, - CLOSE_SENT = 3, + PEER_ABORT_IN_PROGRESS = (1 << 0), + ABORT_REQ_IN_PROGRESS = (1 << 1), + RELEASE_RESOURCES = (1 << 2), }; struct iwch_ep_common { @@ -163,7 +162,7 @@ struct iwch_ep_common { wait_queue_head_t waitq; int rpl_done; int rpl_err; - unsigned long flags; + u32 flags; }; struct iwch_listen_ep { diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c index e1ec65ebb016..ec49a5cbdebb 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_mem.c @@ -39,7 +39,7 @@ #include "iwch.h" #include "iwch_provider.h" -static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) +static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) { u32 mmid; @@ -47,15 +47,14 @@ static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) mhp->attr.stag = stag; mmid = stag >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; + insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); - return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); } int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift) { u32 stag; - int ret; if (cxio_register_phys_mem(&rhp->rdev, &stag, mhp->attr.pdid, @@ -67,11 +66,9 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, mhp->attr.pbl_size, mhp->attr.pbl_addr)) return -ENOMEM; - ret = iwch_finish_mem_reg(mhp, stag); - if (ret) - cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, - mhp->attr.pbl_addr); - return ret; + iwch_finish_mem_reg(mhp, stag); + + return 0; } int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, @@ -80,7 +77,6 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, int npages) { u32 stag; - int ret; /* We could support this... */ if (npages > mhp->attr.pbl_size) @@ -97,12 +93,9 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, mhp->attr.pbl_size, mhp->attr.pbl_addr)) return -ENOMEM; - ret = iwch_finish_mem_reg(mhp, stag); - if (ret) - cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, - mhp->attr.pbl_addr); + iwch_finish_mem_reg(mhp, stag); - return ret; + return 0; } int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c index 6895523779d0..e2a63214008a 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -195,11 +195,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); - if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { - cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); - kfree(chp); - return ERR_PTR(-ENOMEM); - } + insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); if (ucontext) { struct iwch_mm_entry *mm; @@ -754,11 +750,7 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) mhp->attr.stag = stag; mmid = (stag) >> 8; mhp->ibmw.rkey = stag; - if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { - cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); - kfree(mhp); - return ERR_PTR(-ENOMEM); - } + insert_handle(rhp, &rhp->mmidr, mhp, mmid); PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmw); } @@ -786,43 +778,37 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) struct iwch_mr *mhp; u32 mmid; u32 stag = 0; - int ret = 0; + int ret; php = to_iwch_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) - goto err; + return ERR_PTR(-ENOMEM); mhp->rhp = rhp; ret = iwch_alloc_pbl(mhp, pbl_depth); - if (ret) - goto err1; + if (ret) { + kfree(mhp); + return ERR_PTR(ret); + } mhp->attr.pbl_size = pbl_depth; ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, mhp->attr.pbl_size, mhp->attr.pbl_addr); - if (ret) - goto err2; + if (ret) { + iwch_free_pbl(mhp); + kfree(mhp); + return ERR_PTR(ret); + } mhp->attr.pdid = php->pdid; mhp->attr.type = TPT_NON_SHARED_MR; mhp->attr.stag = stag; mhp->attr.state = 1; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; - if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) - goto err3; - + insert_handle(rhp, &rhp->mmidr, mhp, mmid); PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmr); -err3: - cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, - mhp->attr.pbl_addr); -err2: - iwch_free_pbl(mhp); -err1: - kfree(mhp); -err: - return ERR_PTR(ret); } static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( @@ -975,13 +961,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, spin_lock_init(&qhp->lock); init_waitqueue_head(&qhp->wait); atomic_set(&qhp->refcnt, 1); - - if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { - cxio_destroy_qp(&rhp->rdev, &qhp->wq, - ucontext ? &ucontext->uctx : &rhp->rdev.uctx); - kfree(qhp); - return ERR_PTR(-ENOMEM); - } + insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); if (udata) { @@ -1438,7 +1418,6 @@ int iwch_register_device(struct iwch_dev *dev) bail2: ib_unregister_device(&dev->ibdev); bail1: - kfree(dev->ibdev.iwcm); return ret; } @@ -1451,6 +1430,5 @@ void iwch_unregister_device(struct iwch_dev *dev) device_remove_file(&dev->ibdev.dev, iwch_class_attributes[i]); ib_unregister_device(&dev->ibdev); - kfree(dev->ibdev.iwcm); return; } diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c index 6e8653471941..27bbdc8e773a 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -889,7 +889,6 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); init_attr.rqe_count = iwch_rqes_posted(qhp); init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; - init_attr.chan = qhp->ep->l2t->smt_idx; if (peer2peer) { init_attr.rtr_type = RTR_READ; if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_main.c b/trunk/drivers/infiniband/hw/ehca/ehca_main.c index 5b635aa5947e..fab18a2c74a8 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_main.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_main.c @@ -52,7 +52,7 @@ #include "ehca_tools.h" #include "hcp_if.h" -#define HCAD_VERSION "0029" +#define HCAD_VERSION "0028" MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Christoph Raisch "); @@ -64,7 +64,7 @@ static int ehca_hw_level = 0; static int ehca_poll_all_eqs = 1; int ehca_debug_level = 0; -int ehca_nr_ports = -1; +int ehca_nr_ports = 2; int ehca_use_hp_mr = 0; int ehca_port_act_time = 30; int ehca_static_rate = -1; @@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level, "Hardware level (0: autosensing (default), " "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); MODULE_PARM_DESC(nr_ports, - "number of connected ports (-1: autodetect (default), " - "1: port one only, 2: two ports)"); + "number of connected ports (-1: autodetect, 1: port one only, " + "2: two ports (default)"); MODULE_PARM_DESC(use_hp_mr, "Use high performance MRs (default: no)"); MODULE_PARM_DESC(port_act_time, diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c b/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c index 8fd88cd828fd..5a3d96f84c79 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -786,11 +786,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) wc->slid = cqe->rlid; wc->dlid_path_bits = cqe->dlid; wc->src_qp = cqe->remote_qp_number; - /* - * HW has "Immed data present" and "GRH present" in bits 6 and 5. - * SW defines those in bits 1 and 0, so we can just shift and mask. - */ - wc->wc_flags = (cqe->w_completion_flags >> 5) & 3; + wc->wc_flags = cqe->w_completion_flags; wc->ex.imm_data = cpu_to_be32(cqe->immediate_data); wc->sl = cqe->service_level; diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_sqp.c b/trunk/drivers/infiniband/hw/ehca/ehca_sqp.c index 8c1213f8916a..c568b28f4e20 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_sqp.c @@ -125,30 +125,14 @@ struct ib_perf { u8 data[192]; } __attribute__ ((packed)); -/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */ -struct tcslfl { - u32 tc:8; - u32 sl:4; - u32 fl:20; -} __attribute__ ((packed)); - -/* IP Version/TC/FL packed into 32 bits, as in GRH */ -struct vertcfl { - u32 ver:4; - u32 tc:8; - u32 fl:20; -} __attribute__ ((packed)); static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_perf *in_perf = (struct ib_perf *)in_mad; struct ib_perf *out_perf = (struct ib_perf *)out_mad; struct ib_class_port_info *poi = (struct ib_class_port_info *)out_perf->data; - struct tcslfl *tcslfl = - (struct tcslfl *)&poi->redirect_tcslfl; struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); struct ehca_sport *sport = &shca->sport[port_num - 1]; @@ -174,29 +158,10 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, poi->base_version = 1; poi->class_version = 1; poi->resp_time_value = 18; - - /* copy local routing information from WC where applicable */ - tcslfl->sl = in_wc->sl; - poi->redirect_lid = - sport->saved_attr.lid | in_wc->dlid_path_bits; - poi->redirect_qp = sport->pma_qp_nr; + poi->redirect_lid = sport->saved_attr.lid; + poi->redirect_qp = sport->pma_qp_nr; poi->redirect_qkey = IB_QP1_QKEY; - - ehca_query_pkey(ibdev, port_num, in_wc->pkey_index, - &poi->redirect_pkey); - - /* if request was globally routed, copy route info */ - if (in_grh) { - struct vertcfl *vertcfl = - (struct vertcfl *)&in_grh->version_tclass_flow; - memcpy(poi->redirect_gid, in_grh->dgid.raw, - sizeof(poi->redirect_gid)); - tcslfl->tc = vertcfl->tc; - tcslfl->fl = vertcfl->fl; - } else - /* else only fill in default GID */ - ehca_query_gid(ibdev, port_num, 0, - (union ib_gid *)&poi->redirect_gid); + poi->redirect_pkey = IB_DEFAULT_PKEY_FULL; ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", sport->saved_attr.lid, sport->pma_qp_nr); @@ -218,7 +183,8 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + struct ib_mad *in_mad, + struct ib_mad *out_mad) { int ret; @@ -230,8 +196,7 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_SUCCESS; ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); - ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh, - in_mad, out_mad); + ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); return ret; } diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c index 38a287006612..23173982b32c 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port, pd->port_cnt = 1; port_fp(fp) = pd; pd->port_pid = get_pid(task_pid(current)); - strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); + strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); ipath_stats.sps_ports++; ret = 0; } else diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_mad.c b/trunk/drivers/infiniband/hw/ipath/ipath_mad.c index ceb98ee78666..16a702d46018 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_mad.c @@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp, if (smp->attr_mod) smp->status |= IB_SMP_INVALID_FIELD; - memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); + strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); return reply(smp); } diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index 3cb3f47a10b8..ae3d7590346e 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -342,9 +342,6 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, struct mlx4_ib_alloc_ucontext_resp resp; int err; - if (!dev->ib_active) - return ERR_PTR(-EAGAIN); - resp.qp_tab_size = dev->dev->caps.num_qps; resp.bf_reg_size = dev->dev->caps.bf_reg_size; resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; @@ -543,11 +540,15 @@ static struct device_attribute *mlx4_class_attributes[] = { static void *mlx4_ib_add(struct mlx4_dev *dev) { + static int mlx4_ib_version_printed; struct mlx4_ib_dev *ibdev; int num_ports = 0; int i; - printk_once(KERN_INFO "%s", mlx4_ib_version); + if (!mlx4_ib_version_printed) { + printk(KERN_INFO "%s", mlx4_ib_version); + ++mlx4_ib_version_printed; + } mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) num_ports++; @@ -672,8 +673,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_reg; } - ibdev->ib_active = true; - return ibdev; err_reg: @@ -730,7 +729,6 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, break; case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: - ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; break; diff --git a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h index 3486d7675e56..8a7dd6795fa0 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -175,7 +175,6 @@ struct mlx4_ib_dev { spinlock_t sm_lock; struct mutex cap_mask_mutex; - bool ib_active; }; static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index 219b10397b4d..c4a02648c8af 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -615,12 +615,10 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) } static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) - __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (send_cq == recv_cq) spin_lock_irq(&send_cq->lock); - __acquire(&recv_cq->lock); - } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { @@ -630,12 +628,10 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv } static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) - __releases(&send_cq->lock) __releases(&recv_cq->lock) { - if (send_cq == recv_cq) { - __release(&recv_cq->lock); + if (send_cq == recv_cq) spin_unlock_irq(&send_cq->lock); - } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_catas.c b/trunk/drivers/infiniband/hw/mthca/mthca_catas.c index 056b2a4c6970..65ad359fdf16 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_catas.c @@ -88,7 +88,6 @@ static void handle_catas(struct mthca_dev *dev) event.device = &dev->ib_dev; event.event = IB_EVENT_DEVICE_FATAL; event.element.port_num = 0; - dev->active = false; ib_dispatch_event(&event); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_config_reg.h b/trunk/drivers/infiniband/hw/mthca/mthca_config_reg.h index 155bc66395be..75671f75cac4 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_config_reg.h +++ b/trunk/drivers/infiniband/hw/mthca/mthca_config_reg.h @@ -34,6 +34,8 @@ #ifndef MTHCA_CONFIG_REG_H #define MTHCA_CONFIG_REG_H +#include + #define MTHCA_HCR_BASE 0x80680 #define MTHCA_HCR_SIZE 0x0001c #define MTHCA_ECR_BASE 0x80700 diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_dev.h b/trunk/drivers/infiniband/hw/mthca/mthca_dev.h index 7e6a6d64ad4e..9ef611f6dd36 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/trunk/drivers/infiniband/hw/mthca/mthca_dev.h @@ -357,7 +357,6 @@ struct mthca_dev { struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; spinlock_t sm_lock; u8 rate[MTHCA_MAX_PORTS]; - bool active; }; #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_eq.c b/trunk/drivers/infiniband/hw/mthca/mthca_eq.c index 8c31fa36e95e..90e4e450a120 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_eq.c @@ -829,34 +829,27 @@ int mthca_init_eq_table(struct mthca_dev *dev) if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { static const char *eq_name[] = { - [MTHCA_EQ_COMP] = DRV_NAME "-comp", - [MTHCA_EQ_ASYNC] = DRV_NAME "-async", - [MTHCA_EQ_CMD] = DRV_NAME "-cmd" + [MTHCA_EQ_COMP] = DRV_NAME " (comp)", + [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", + [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" }; for (i = 0; i < MTHCA_NUM_EQ; ++i) { - snprintf(dev->eq_table.eq[i].irq_name, - IB_DEVICE_NAME_MAX, - "%s@pci:%s", eq_name[i], - pci_name(dev->pdev)); err = request_irq(dev->eq_table.eq[i].msi_x_vector, mthca_is_memfree(dev) ? mthca_arbel_msi_x_interrupt : mthca_tavor_msi_x_interrupt, - 0, dev->eq_table.eq[i].irq_name, - dev->eq_table.eq + i); + 0, eq_name[i], dev->eq_table.eq + i); if (err) goto err_out_cmd; dev->eq_table.eq[i].have_irq = 1; } } else { - snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX, - DRV_NAME "@pci:%s", pci_name(dev->pdev)); err = request_irq(dev->pdev->irq, mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt, - IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev); + IRQF_SHARED, DRV_NAME, dev); if (err) goto err_out_cmd; dev->eq_table.have_irq = 1; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_main.c b/trunk/drivers/infiniband/hw/mthca/mthca_main.c index b01b28987874..13da9f1d24c0 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_main.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_main.c @@ -1116,8 +1116,6 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) pci_set_drvdata(pdev, mdev); mdev->hca_type = hca_type; - mdev->active = true; - return 0; err_unregister: @@ -1217,11 +1215,15 @@ int __mthca_restart_one(struct pci_dev *pdev) static int __devinit mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { + static int mthca_version_printed = 0; int ret; mutex_lock(&mthca_device_mutex); - printk_once(KERN_INFO "%s", mthca_version); + if (!mthca_version_printed) { + printk(KERN_INFO "%s", mthca_version); + ++mthca_version_printed; + } if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { printk(KERN_ERR PFX "%s has invalid driver data %lx\n", diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_provider.c b/trunk/drivers/infiniband/hw/mthca/mthca_provider.c index bcf7a4014820..87ad889e367b 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_provider.c @@ -334,9 +334,6 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, struct mthca_ucontext *context; int err; - if (!(to_mdev(ibdev)->active)) - return ERR_PTR(-EAGAIN); - memset(&uresp, 0, sizeof uresp); uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_provider.h b/trunk/drivers/infiniband/hw/mthca/mthca_provider.h index 90f4c4d2e983..c621f8794b88 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/trunk/drivers/infiniband/hw/mthca/mthca_provider.h @@ -113,7 +113,6 @@ struct mthca_eq { int nent; struct mthca_buf_list *page_list; struct mthca_mr mr; - char irq_name[IB_DEVICE_NAME_MAX]; }; struct mthca_av; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c index c10576fa60c1..f5081bfde6db 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1319,12 +1319,10 @@ int mthca_alloc_qp(struct mthca_dev *dev, } static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) - __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (send_cq == recv_cq) spin_lock_irq(&send_cq->lock); - __acquire(&recv_cq->lock); - } else if (send_cq->cqn < recv_cq->cqn) { + else if (send_cq->cqn < recv_cq->cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else { @@ -1334,12 +1332,10 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) } static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) - __releases(&send_cq->lock) __releases(&recv_cq->lock) { - if (send_cq == recv_cq) { - __release(&recv_cq->lock); + if (send_cq == recv_cq) spin_unlock_irq(&send_cq->lock); - } else if (send_cq->cqn < recv_cq->cqn) { + else if (send_cq->cqn < recv_cq->cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_reset.c b/trunk/drivers/infiniband/hw/mthca/mthca_reset.c index 2a13a163d337..acb6817f6060 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_reset.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_reset.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include #include #include #include diff --git a/trunk/drivers/infiniband/hw/nes/nes.h b/trunk/drivers/infiniband/hw/nes/nes.h index bcc6abc4faff..bf1720f7f35f 100644 --- a/trunk/drivers/infiniband/hw/nes/nes.h +++ b/trunk/drivers/infiniband/hw/nes/nes.h @@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *); void nes_cm_disconn_worker(void *); /* nes_verbs.c */ -int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32); +int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32); int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); struct nes_ib_device *nes_init_ofa_device(struct net_device *); void nes_destroy_ofa_device(struct nes_ib_device *); diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.c b/trunk/drivers/infiniband/hw/nes/nes_cm.c index 73473db19863..114b802771ad 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_cm.c +++ b/trunk/drivers/infiniband/hw/nes/nes_cm.c @@ -2450,16 +2450,19 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod */ int nes_cm_disconn(struct nes_qp *nesqp) { - struct disconn_work *work; + unsigned long flags; - work = kzalloc(sizeof *work, GFP_ATOMIC); - if (!work) - return -ENOMEM; /* Timer will clean up */ + spin_lock_irqsave(&nesqp->lock, flags); + if (nesqp->disconn_pending == 0) { + nesqp->disconn_pending++; + spin_unlock_irqrestore(&nesqp->lock, flags); + /* init our disconnect work element, to */ + INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker); + + queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); + } else + spin_unlock_irqrestore(&nesqp->lock, flags); - nes_add_ref(&nesqp->ibqp); - work->nesqp = nesqp; - INIT_WORK(&work->work, nes_disconnect_worker); - queue_work(g_cm_core->disconn_wq, &work->work); return 0; } @@ -2469,14 +2472,11 @@ int nes_cm_disconn(struct nes_qp *nesqp) */ static void nes_disconnect_worker(struct work_struct *work) { - struct disconn_work *dwork = container_of(work, struct disconn_work, work); - struct nes_qp *nesqp = dwork->nesqp; + struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); - kfree(dwork); nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", nesqp->last_aeq, nesqp->hwqp.qp_id); nes_cm_disconn_true(nesqp); - nes_rem_ref(&nesqp->ibqp); } @@ -2493,12 +2493,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) u16 last_ae; u8 original_hw_tcp_state; u8 original_ibqp_state; - enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK; - int issue_disconn = 0; - int issue_close = 0; - int issue_flush = 0; - u32 flush_q = NES_CQP_FLUSH_RQ; - struct ib_event ibevent; + u8 issued_disconnect_reset = 0; if (!nesqp) { nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); @@ -2522,55 +2517,24 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) original_ibqp_state = nesqp->ibqp_state; last_ae = nesqp->last_aeq; - if (nesqp->term_flags) { - issue_disconn = 1; - issue_close = 1; - nesqp->cm_id = NULL; - if (nesqp->flush_issued == 0) { - nesqp->flush_issued = 1; - issue_flush = 1; - } - } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || - ((original_ibqp_state == IB_QPS_RTS) && - (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { - issue_disconn = 1; - if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) - disconn_status = IW_CM_EVENT_STATUS_RESET; - } - - if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || - (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) || - (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) || - (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { - issue_close = 1; - nesqp->cm_id = NULL; - if (nesqp->flush_issued == 0) { - nesqp->flush_issued = 1; - issue_flush = 1; - } - } - - spin_unlock_irqrestore(&nesqp->lock, flags); - if ((issue_flush) && (nesqp->destroyed == 0)) { - /* Flush the queue(s) */ - if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE) - flush_q |= NES_CQP_FLUSH_SQ; - flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1); + nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state); - if (nesqp->term_flags) { - ibevent.device = nesqp->ibqp.device; - ibevent.event = nesqp->terminate_eventtype; - ibevent.element.qp = &nesqp->ibqp; - nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); - } - } - - if ((cm_id) && (cm_id->event_handler)) { - if (issue_disconn) { + if ((nesqp->cm_id) && (cm_id->event_handler)) { + if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || + ((original_ibqp_state == IB_QPS_RTS) && + (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { atomic_inc(&cm_disconnects); cm_event.event = IW_CM_EVENT_DISCONNECT; - cm_event.status = disconn_status; + if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { + cm_event.status = IW_CM_EVENT_STATUS_RESET; + nes_debug(NES_DBG_CM, "Generating a CM " + "Disconnect Event (status reset) for " + "QP%u, cm_id = %p. \n", + nesqp->hwqp.qp_id, cm_id); + } else + cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; @@ -2583,14 +2547,29 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) nesqp->hwqp.sq_tail, cm_id, atomic_read(&nesqp->refcount)); + spin_unlock_irqrestore(&nesqp->lock, flags); ret = cm_id->event_handler(cm_id, &cm_event); if (ret) nes_debug(NES_DBG_CM, "OFA CM event_handler " "returned, ret=%d\n", ret); + spin_lock_irqsave(&nesqp->lock, flags); } - if (issue_close) { + nesqp->disconn_pending = 0; + /* There might have been another AE while the lock was released */ + original_hw_tcp_state = nesqp->hw_tcp_state; + original_ibqp_state = nesqp->ibqp_state; + last_ae = nesqp->last_aeq; + + if ((issued_disconnect_reset == 0) && (nesqp->cm_id) && + ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || + (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) || + (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) || + (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { atomic_inc(&cm_closes); + nesqp->cm_id = NULL; + nesqp->in_disconnect = 0; + spin_unlock_irqrestore(&nesqp->lock, flags); nes_disconnect(nesqp, 1); cm_id->provider_data = nesqp; @@ -2609,7 +2588,28 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) } cm_id->rem_ref(cm_id); + + spin_lock_irqsave(&nesqp->lock, flags); + if (nesqp->flush_issued == 0) { + nesqp->flush_issued = 1; + spin_unlock_irqrestore(&nesqp->lock, flags); + flush_wqes(nesvnic->nesdev, nesqp, + NES_CQP_FLUSH_RQ, 1); + } else + spin_unlock_irqrestore(&nesqp->lock, flags); + } else { + cm_id = nesqp->cm_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + /* check to see if the inbound reset beat the outbound reset */ + if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) { + nes_debug(NES_DBG_CM, "QP%u: Decing refcount " + "due to inbound reset beating the " + "outbound reset.\n", nesqp->hwqp.qp_id); + } } + } else { + nesqp->disconn_pending = 0; + spin_unlock_irqrestore(&nesqp->lock, flags); } return 0; diff --git a/trunk/drivers/infiniband/hw/nes/nes_cm.h b/trunk/drivers/infiniband/hw/nes/nes_cm.h index 90e8e4d8a5ce..8b7e7c0e496e 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_cm.h +++ b/trunk/drivers/infiniband/hw/nes/nes_cm.h @@ -410,6 +410,8 @@ struct nes_cm_ops { int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, enum nes_timer_type, int, int); +int nes_cm_disconn(struct nes_qp *); + int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); int nes_reject(struct iw_cm_id *, const void *, u8); int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); diff --git a/trunk/drivers/infiniband/hw/nes/nes_hw.c b/trunk/drivers/infiniband/hw/nes/nes_hw.c index 63a1a8e1e8a3..4a84d02ece06 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_hw.c +++ b/trunk/drivers/infiniband/hw/nes/nes_hw.c @@ -74,8 +74,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, static void process_critical_error(struct nes_device *nesdev); static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); -static void nes_terminate_timeout(unsigned long context); -static void nes_terminate_start_timer(struct nes_qp *nesqp); #ifdef CONFIG_INFINIBAND_NES_DEBUG static unsigned char *nes_iwarp_state_str[] = { @@ -2905,417 +2903,6 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) } -static u8 *locate_mpa(u8 *pkt, u32 aeq_info) -{ - u16 pkt_len; - - if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { - /* skip over ethernet header */ - pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2)); - pkt += ETH_HLEN; - - /* Skip over IP and TCP headers */ - pkt += 4 * (pkt[0] & 0x0f); - pkt += 4 * ((pkt[12] >> 4) & 0x0f); - } - return pkt; -} - -/* Determine if incoming error pkt is rdma layer */ -static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info) -{ - u8 *pkt; - u16 *mpa; - u32 opcode = 0xffffffff; - - if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { - pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; - mpa = (u16 *)locate_mpa(pkt, aeq_info); - opcode = be16_to_cpu(mpa[1]) & 0xf; - } - - return opcode; -} - -/* Build iWARP terminate header */ -static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info) -{ - u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; - u16 ddp_seg_len; - int copy_len = 0; - u8 is_tagged = 0; - u8 flush_code = 0; - struct nes_terminate_hdr *termhdr; - - termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase; - memset(termhdr, 0, 64); - - if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { - - /* Use data from offending packet to fill in ddp & rdma hdrs */ - pkt = locate_mpa(pkt, aeq_info); - ddp_seg_len = be16_to_cpu(*(u16 *)pkt); - if (ddp_seg_len) { - copy_len = 2; - termhdr->hdrct = DDP_LEN_FLAG; - if (pkt[2] & 0x80) { - is_tagged = 1; - if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { - copy_len += TERM_DDP_LEN_TAGGED; - termhdr->hdrct |= DDP_HDR_FLAG; - } - } else { - if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { - copy_len += TERM_DDP_LEN_UNTAGGED; - termhdr->hdrct |= DDP_HDR_FLAG; - } - - if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) { - if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) { - copy_len += TERM_RDMA_LEN; - termhdr->hdrct |= RDMA_HDR_FLAG; - } - } - } - } - } - - switch (async_event_id) { - case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: - switch (iwarp_opcode(nesqp, aeq_info)) { - case IWARP_OPCODE_WRITE: - flush_code = IB_WC_LOC_PROT_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; - termhdr->error_code = DDP_TAGGED_INV_STAG; - break; - default: - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_INV_STAG; - } - break; - case NES_AEQE_AEID_AMP_INVALID_STAG: - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_INV_STAG; - break; - case NES_AEQE_AEID_AMP_BAD_QP: - flush_code = IB_WC_LOC_QP_OP_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; - termhdr->error_code = DDP_UNTAGGED_INV_QN; - break; - case NES_AEQE_AEID_AMP_BAD_STAG_KEY: - case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: - switch (iwarp_opcode(nesqp, aeq_info)) { - case IWARP_OPCODE_SEND_INV: - case IWARP_OPCODE_SEND_SE_INV: - flush_code = IB_WC_REM_OP_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; - termhdr->error_code = RDMAP_CANT_INV_STAG; - break; - default: - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_INV_STAG; - } - break; - case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: - if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) { - flush_code = IB_WC_LOC_PROT_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; - termhdr->error_code = DDP_TAGGED_BOUNDS; - } else { - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_INV_BOUNDS; - } - break; - case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION: - case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: - case NES_AEQE_AEID_PRIV_OPERATION_DENIED: - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_ACCESS; - break; - case NES_AEQE_AEID_AMP_TO_WRAP: - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_TO_WRAP; - break; - case NES_AEQE_AEID_AMP_BAD_PD: - switch (iwarp_opcode(nesqp, aeq_info)) { - case IWARP_OPCODE_WRITE: - flush_code = IB_WC_LOC_PROT_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; - termhdr->error_code = DDP_TAGGED_UNASSOC_STAG; - break; - case IWARP_OPCODE_SEND_INV: - case IWARP_OPCODE_SEND_SE_INV: - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_CANT_INV_STAG; - break; - default: - flush_code = IB_WC_REM_ACCESS_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; - termhdr->error_code = RDMAP_UNASSOC_STAG; - } - break; - case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: - flush_code = IB_WC_LOC_LEN_ERR; - termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; - termhdr->error_code = MPA_MARKER; - break; - case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: - flush_code = IB_WC_GENERAL_ERR; - termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; - termhdr->error_code = MPA_CRC; - break; - case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE: - case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: - flush_code = IB_WC_LOC_LEN_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; - termhdr->error_code = DDP_CATASTROPHIC_LOCAL; - break; - case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC: - case NES_AEQE_AEID_DDP_NO_L_BIT: - flush_code = IB_WC_FATAL_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; - termhdr->error_code = DDP_CATASTROPHIC_LOCAL; - break; - case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN: - case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: - flush_code = IB_WC_GENERAL_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; - termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE; - break; - case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: - flush_code = IB_WC_LOC_LEN_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; - termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG; - break; - case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION: - flush_code = IB_WC_GENERAL_ERR; - if (is_tagged) { - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; - termhdr->error_code = DDP_TAGGED_INV_DDP_VER; - } else { - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; - termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER; - } - break; - case NES_AEQE_AEID_DDP_UBE_INVALID_MO: - flush_code = IB_WC_GENERAL_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; - termhdr->error_code = DDP_UNTAGGED_INV_MO; - break; - case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: - flush_code = IB_WC_REM_OP_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; - termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF; - break; - case NES_AEQE_AEID_DDP_UBE_INVALID_QN: - flush_code = IB_WC_GENERAL_ERR; - termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; - termhdr->error_code = DDP_UNTAGGED_INV_QN; - break; - case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION: - flush_code = IB_WC_GENERAL_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; - termhdr->error_code = RDMAP_INV_RDMAP_VER; - break; - case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE: - flush_code = IB_WC_LOC_QP_OP_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; - termhdr->error_code = RDMAP_UNEXPECTED_OP; - break; - default: - flush_code = IB_WC_FATAL_ERR; - termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; - termhdr->error_code = RDMAP_UNSPECIFIED; - break; - } - - if (copy_len) - memcpy(termhdr + 1, pkt, copy_len); - - if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) { - if (aeq_info & NES_AEQE_SQ) - nesqp->term_sq_flush_code = flush_code; - else - nesqp->term_rq_flush_code = flush_code; - } - - return sizeof(struct nes_terminate_hdr) + copy_len; -} - -static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp, - struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype) -{ - u64 context; - unsigned long flags; - u32 aeq_info; - u16 async_event_id; - u8 tcp_state; - u8 iwarp_state; - u32 termlen = 0; - u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE | - NES_CQP_QP_TERM_DONT_SEND_FIN; - struct nes_adapter *nesadapter = nesdev->nesadapter; - - if (nesqp->term_flags & NES_TERM_SENT) - return; /* Sanity check */ - - aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); - tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; - iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; - async_event_id = (u16)aeq_info; - - context = (unsigned long)nesadapter->qp_table[le32_to_cpu( - aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; - if (!context) { - WARN_ON(!context); - return; - } - - nesqp = (struct nes_qp *)(unsigned long)context; - spin_lock_irqsave(&nesqp->lock, flags); - nesqp->hw_iwarp_state = iwarp_state; - nesqp->hw_tcp_state = tcp_state; - nesqp->last_aeq = async_event_id; - nesqp->terminate_eventtype = eventtype; - spin_unlock_irqrestore(&nesqp->lock, flags); - - if (nesadapter->send_term_ok) - termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info); - else - mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG; - - nes_terminate_start_timer(nesqp); - nesqp->term_flags |= NES_TERM_SENT; - nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0); -} - -static void nes_terminate_send_fin(struct nes_device *nesdev, - struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe) -{ - u32 aeq_info; - u16 async_event_id; - u8 tcp_state; - u8 iwarp_state; - unsigned long flags; - - aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); - tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; - iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; - async_event_id = (u16)aeq_info; - - spin_lock_irqsave(&nesqp->lock, flags); - nesqp->hw_iwarp_state = iwarp_state; - nesqp->hw_tcp_state = tcp_state; - nesqp->last_aeq = async_event_id; - spin_unlock_irqrestore(&nesqp->lock, flags); - - /* Send the fin only */ - nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE | - NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0); -} - -/* Cleanup after a terminate sent or received */ -static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred) -{ - u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; - unsigned long flags; - struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device); - struct nes_device *nesdev = nesvnic->nesdev; - u8 first_time = 0; - - spin_lock_irqsave(&nesqp->lock, flags); - if (nesqp->hte_added) { - nesqp->hte_added = 0; - next_iwarp_state |= NES_CQP_QP_DEL_HTE; - } - - first_time = (nesqp->term_flags & NES_TERM_DONE) == 0; - nesqp->term_flags |= NES_TERM_DONE; - spin_unlock_irqrestore(&nesqp->lock, flags); - - /* Make sure we go through this only once */ - if (first_time) { - if (timeout_occurred == 0) - del_timer(&nesqp->terminate_timer); - else - next_iwarp_state |= NES_CQP_QP_RESET; - - nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); - nes_cm_disconn(nesqp); - } -} - -static void nes_terminate_received(struct nes_device *nesdev, - struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe) -{ - u32 aeq_info; - u8 *pkt; - u32 *mpa; - u8 ddp_ctl; - u8 rdma_ctl; - u16 aeq_id = 0; - - aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); - if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) { - /* Terminate is not a performance path so the silicon */ - /* did not validate the frame - do it now */ - pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET; - mpa = (u32 *)locate_mpa(pkt, aeq_info); - ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff; - rdma_ctl = be32_to_cpu(mpa[0]) & 0xff; - if ((ddp_ctl & 0xc0) != 0x40) - aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC; - else if ((ddp_ctl & 0x03) != 1) - aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION; - else if (be32_to_cpu(mpa[2]) != 2) - aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN; - else if (be32_to_cpu(mpa[3]) != 1) - aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN; - else if (be32_to_cpu(mpa[4]) != 0) - aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO; - else if ((rdma_ctl & 0xc0) != 0x40) - aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION; - - if (aeq_id) { - /* Bad terminate recvd - send back a terminate */ - aeq_info = (aeq_info & 0xffff0000) | aeq_id; - aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info); - nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); - return; - } - } - - nesqp->term_flags |= NES_TERM_RCVD; - nesqp->terminate_eventtype = IB_EVENT_QP_FATAL; - nes_terminate_start_timer(nesqp); - nes_terminate_send_fin(nesdev, nesqp, aeqe); -} - -/* Timeout routine in case terminate fails to complete */ -static void nes_terminate_timeout(unsigned long context) -{ - struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context; - - nes_terminate_done(nesqp, 1); -} - -/* Set a timer in case hw cannot complete the terminate sequence */ -static void nes_terminate_start_timer(struct nes_qp *nesqp) -{ - init_timer(&nesqp->terminate_timer); - nesqp->terminate_timer.function = nes_terminate_timeout; - nesqp->terminate_timer.expires = jiffies + HZ; - nesqp->terminate_timer.data = (unsigned long)nesqp; - add_timer(&nesqp->terminate_timer); -} - /** * nes_process_iwarp_aeqe */ @@ -3323,27 +2910,28 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe) { u64 context; + u64 aeqe_context = 0; unsigned long flags; struct nes_qp *nesqp; - struct nes_hw_cq *hw_cq; - struct nes_cq *nescq; int resource_allocated; + /* struct iw_cm_id *cm_id; */ struct nes_adapter *nesadapter = nesdev->nesadapter; + struct ib_event ibevent; + /* struct iw_cm_event cm_event; */ u32 aeq_info; u32 next_iwarp_state = 0; u16 async_event_id; u8 tcp_state; u8 iwarp_state; - int must_disconn = 1; - int must_terminate = 0; - struct ib_event ibevent; nes_debug(NES_DBG_AEQ, "\n"); aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); - if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) { + if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) { context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; } else { + aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); + aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; context = (unsigned long)nesadapter->qp_table[le32_to_cpu( aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; BUG_ON(!context); @@ -3360,11 +2948,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, switch (async_event_id) { case NES_AEQE_AEID_LLP_FIN_RECEIVED: - nesqp = (struct nes_qp *)(unsigned long)context; - - if (nesqp->term_flags) - return; /* Ignore it, wait for close complete */ - + nesqp = *((struct nes_qp **)&context); if (atomic_inc_return(&nesqp->close_timer_started) == 1) { nesqp->cm_id->add_ref(nesqp->cm_id); schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, @@ -3375,24 +2959,18 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), async_event_id, nesqp->last_aeq, tcp_state); } - if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || (nesqp->ibqp_state != IB_QPS_RTS)) { /* FIN Received but tcp state or IB state moved on, should expect a close complete */ return; } - case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: - nesqp = (struct nes_qp *)(unsigned long)context; - if (nesqp->term_flags) { - nes_terminate_done(nesqp, 0); - return; - } - case NES_AEQE_AEID_LLP_CONNECTION_RESET: + case NES_AEQE_AEID_TERMINATE_SENT: + case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE: case NES_AEQE_AEID_RESET_SENT: - nesqp = (struct nes_qp *)(unsigned long)context; + nesqp = *((struct nes_qp **)&context); if (async_event_id == NES_AEQE_AEID_RESET_SENT) { tcp_state = NES_AEQE_TCP_STATE_CLOSED; } @@ -3404,7 +2982,12 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { nesqp->hte_added = 0; - next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n", + nesqp->hwqp.qp_id); + nes_hw_modify_qp(nesdev, nesqp, + NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0); + spin_lock_irqsave(&nesqp->lock, flags); } if ((nesqp->ibqp_state == IB_QPS_RTS) && @@ -3416,106 +2999,151 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; break; case NES_AEQE_IWARP_STATE_TERMINATE: - must_disconn = 0; /* terminate path takes care of disconn */ - if (nesqp->term_flags == 0) - must_terminate = 1; + next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; + nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; + if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { + next_iwarp_state |= 0x02000000; + nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; + } break; + default: + next_iwarp_state = 0; + } + spin_unlock_irqrestore(&nesqp->lock, flags); + if (next_iwarp_state) { + nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X," + " also added another reference\n", + nesqp->hwqp.qp_id, next_iwarp_state); + nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); } + nes_cm_disconn(nesqp); } else { if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { /* FIN Received but ib state not RTS, close complete will be on its way */ - must_disconn = 0; + spin_unlock_irqrestore(&nesqp->lock, flags); + return; } - } - spin_unlock_irqrestore(&nesqp->lock, flags); - - if (must_terminate) - nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); - else if (must_disconn) { - if (next_iwarp_state) { - nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n", - nesqp->hwqp.qp_id, next_iwarp_state); - nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); + spin_unlock_irqrestore(&nesqp->lock, flags); + if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { + next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000; + nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; + nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X," + " also added another reference\n", + nesqp->hwqp.qp_id, next_iwarp_state); + nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); } nes_cm_disconn(nesqp); } break; - - case NES_AEQE_AEID_TERMINATE_SENT: - nesqp = (struct nes_qp *)(unsigned long)context; - nes_terminate_send_fin(nesdev, nesqp, aeqe); - break; - case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: - nesqp = (struct nes_qp *)(unsigned long)context; - nes_terminate_received(nesdev, nesqp, aeqe); + nesqp = *((struct nes_qp **)&context); + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED" + " event on QP%u \n Q2 Data:\n", + nesqp->hwqp.qp_id); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_FATAL; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); + } + if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || + ((nesqp->ibqp_state == IB_QPS_RTS)&& + (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { + nes_cm_disconn(nesqp); + } else { + nesqp->in_disconnect = 0; + wake_up(&nesqp->kick_waitq); + } + break; + case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: + nesqp = *((struct nes_qp **)&context); + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; + nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; + nesqp->last_aeq = async_event_id; + if (nesqp->cm_id) { + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES" + " event on QP%u, remote IP = 0x%08X \n", + nesqp->hwqp.qp_id, + ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr)); + } else { + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES" + " event on QP%u \n", + nesqp->hwqp.qp_id); + } + spin_unlock_irqrestore(&nesqp->lock, flags); + next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET; + nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_FATAL; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); + } break; - - case NES_AEQE_AEID_AMP_BAD_STAG_KEY: case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: + if (NES_AEQE_INBOUND_RDMA&aeq_info) { + nesqp = nesadapter->qp_table[le32_to_cpu( + aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; + } else { + /* TODO: get the actual WQE and mask off wqe index */ + context &= ~((u64)511); + nesqp = *((struct nes_qp **)&context); + } + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n", + nesqp->hwqp.qp_id); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); + } + break; case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: - case NES_AEQE_AEID_AMP_INVALID_STAG: - case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION: - case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: - case NES_AEQE_AEID_PRIV_OPERATION_DENIED: - case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: - case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: - case NES_AEQE_AEID_AMP_TO_WRAP: - nesqp = (struct nes_qp *)(unsigned long)context; - nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR); + nesqp = *((struct nes_qp **)&context); + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n", + nesqp->hwqp.qp_id); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); + } break; - - case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE: - case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: - case NES_AEQE_AEID_DDP_UBE_INVALID_MO: - case NES_AEQE_AEID_DDP_UBE_INVALID_QN: - nesqp = (struct nes_qp *)(unsigned long)context; - if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) { - aeq_info &= 0xffff0000; - aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE; - aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info); + case NES_AEQE_AEID_PRIV_OPERATION_DENIED: + nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words + [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u," + " nesqp = %p, AE reported %p\n", + nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context)); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); } - - case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE: - case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: - case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: - case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: - case NES_AEQE_AEID_AMP_BAD_QP: - case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: - case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC: - case NES_AEQE_AEID_DDP_NO_L_BIT: - case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN: - case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: - case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION: - case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION: - case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE: - case NES_AEQE_AEID_AMP_BAD_PD: - case NES_AEQE_AEID_AMP_FASTREG_SHARED: - case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG: - case NES_AEQE_AEID_AMP_FASTREG_MW_STAG: - case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS: - case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW: - case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH: - case NES_AEQE_AEID_AMP_INVALIDATE_SHARED: - case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS: - case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG: - case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG: - case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG: - case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG: - case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS: - case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS: - case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT: - case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED: - case NES_AEQE_AEID_BAD_CLOSE: - case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO: - case NES_AEQE_AEID_STAG_ZERO_INVALID: - case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST: - case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: - nesqp = (struct nes_qp *)(unsigned long)context; - nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); break; - case NES_AEQE_AEID_CQ_OPERATION_ERROR: context <<= 1; nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", @@ -3525,19 +3153,83 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, if (resource_allocated) { printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); - hw_cq = (struct nes_hw_cq *)(unsigned long)context; - if (hw_cq) { - nescq = container_of(hw_cq, struct nes_cq, hw_cq); - if (nescq->ibcq.event_handler) { - ibevent.device = nescq->ibcq.device; - ibevent.event = IB_EVENT_CQ_ERR; - ibevent.element.cq = &nescq->ibcq; - nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context); - } - } } break; - + case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: + nesqp = nesadapter->qp_table[le32_to_cpu( + aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG" + "_FOR_AVAILABLE_BUFFER event on QP%u\n", + nesqp->hwqp.qp_id); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); + } + /* tell cm to disconnect, cm will queue work to thread */ + nes_cm_disconn(nesqp); + break; + case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: + nesqp = *((struct nes_qp **)&context); + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN" + "_NO_BUFFER_AVAILABLE event on QP%u\n", + nesqp->hwqp.qp_id); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_FATAL; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); + } + /* tell cm to disconnect, cm will queue work to thread */ + nes_cm_disconn(nesqp); + break; + case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: + nesqp = *((struct nes_qp **)&context); + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR" + " event on QP%u \n Q2 Data:\n", + nesqp->hwqp.qp_id); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_FATAL; + nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); + } + /* tell cm to disconnect, cm will queue work to thread */ + nes_cm_disconn(nesqp); + break; + /* TODO: additional AEs need to be here */ + case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: + nesqp = *((struct nes_qp **)&context); + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + if (nesqp->ibqp.event_handler) { + ibevent.device = nesqp->ibqp.device; + ibevent.element.qp = &nesqp->ibqp; + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + nesqp->ibqp.event_handler(&ibevent, + nesqp->ibqp.qp_context); + } + nes_cm_disconn(nesqp); + break; default: nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", async_event_id); @@ -3546,6 +3238,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, } + /** * nes_iwarp_ce_handler */ @@ -3680,8 +3373,6 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, { struct nes_cqp_request *cqp_request; struct nes_hw_cqp_wqe *cqp_wqe; - u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; - u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; int ret; cqp_request = nes_get_cqp_request(nesdev); @@ -3698,24 +3389,6 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); - /* If wqe in error was identified, set code to be put into cqe */ - if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) { - which_wq |= NES_CQP_FLUSH_MAJ_MIN; - sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code; - nesqp->term_sq_flush_code = 0; - } - - if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) { - which_wq |= NES_CQP_FLUSH_MAJ_MIN; - rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code; - nesqp->term_rq_flush_code = 0; - } - - if (which_wq & NES_CQP_FLUSH_MAJ_MIN) { - cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code); - cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code); - } - cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); diff --git a/trunk/drivers/infiniband/hw/nes/nes_hw.h b/trunk/drivers/infiniband/hw/nes/nes_hw.h index f28a41ba9fa1..c3654c6383fe 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_hw.h +++ b/trunk/drivers/infiniband/hw/nes/nes_hw.h @@ -241,7 +241,6 @@ enum nes_cqp_stag_wqeword_idx { }; #define NES_CQP_OP_IWARP_STATE_SHIFT 28 -#define NES_CQP_OP_TERMLEN_SHIFT 28 enum nes_cqp_qp_bits { NES_CQP_QP_ARP_VALID = (1<<8), @@ -266,16 +265,12 @@ enum nes_cqp_qp_bits { NES_CQP_QP_IWARP_STATE_TERMINATE = (5< 21)) || ((major_ver > 2) && (major_ver != 255))) { nesadapter->virtwq = 1; } - if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3)) - nesadapter->send_term_ok = 1; - nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + (u32)((u8)eeprom_data); @@ -551,7 +548,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev) spin_unlock_irqrestore(&nesdev->cqp.lock, flags); } if (cqp_request == NULL) { - cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC); + cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); if (cqp_request) { cqp_request->dynamic = 1; INIT_LIST_HEAD(&cqp_request->list); diff --git a/trunk/drivers/infiniband/hw/nes/nes_verbs.c b/trunk/drivers/infiniband/hw/nes/nes_verbs.c index a680c42d6e8c..21e0fd336cf7 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_verbs.c +++ b/trunk/drivers/infiniband/hw/nes/nes_verbs.c @@ -667,32 +667,15 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop */ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { - struct nes_vnic *nesvnic = to_nesvnic(ibdev); - struct net_device *netdev = nesvnic->netdev; - memset(props, 0, sizeof(*props)); - props->max_mtu = IB_MTU_4096; - - if (netdev->mtu >= 4096) - props->active_mtu = IB_MTU_4096; - else if (netdev->mtu >= 2048) - props->active_mtu = IB_MTU_2048; - else if (netdev->mtu >= 1024) - props->active_mtu = IB_MTU_1024; - else if (netdev->mtu >= 512) - props->active_mtu = IB_MTU_512; - else - props->active_mtu = IB_MTU_256; - + props->max_mtu = IB_MTU_2048; + props->active_mtu = IB_MTU_2048; props->lid = 1; props->lmc = 0; props->sm_lid = 0; props->sm_sl = 0; - if (nesvnic->linkup) - props->state = IB_PORT_ACTIVE; - else - props->state = IB_PORT_DOWN; + props->state = IB_PORT_ACTIVE; props->phys_state = 0; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; @@ -1522,46 +1505,13 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, } -/** - * nes_clean_cq - */ -static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq) -{ - u32 cq_head; - u32 lo; - u32 hi; - u64 u64temp; - unsigned long flags = 0; - - spin_lock_irqsave(&nescq->lock, flags); - - cq_head = nescq->hw_cq.cq_head; - while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) { - rmb(); - lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); - hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]); - u64temp = (((u64)hi) << 32) | ((u64)lo); - u64temp &= ~(NES_SW_CONTEXT_ALIGN-1); - if (u64temp == (u64)(unsigned long)nesqp) { - /* Zero the context value so cqe will be ignored */ - nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0; - nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0; - } - - if (++cq_head >= nescq->hw_cq.cq_size) - cq_head = 0; - } - - spin_unlock_irqrestore(&nescq->lock, flags); -} - - /** * nes_destroy_qp */ static int nes_destroy_qp(struct ib_qp *ibqp) { struct nes_qp *nesqp = to_nesqp(ibqp); + /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */ struct nes_ucontext *nes_ucontext; struct ib_qp_attr attr; struct iw_cm_id *cm_id; @@ -1598,6 +1548,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); } + if (nesqp->user_mode) { if ((ibqp->uobject)&&(ibqp->uobject->context)) { nes_ucontext = to_nesucontext(ibqp->uobject->context); @@ -1609,13 +1560,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp) } if (nesqp->pbl_pbase) kunmap(nesqp->page); - } else { - /* Clean any pending completions from the cq(s) */ - if (nesqp->nesscq) - nes_clean_cq(nesqp, nesqp->nesscq); - - if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq)) - nes_clean_cq(nesqp, nesqp->nesrcq); } nes_rem_ref(&nesqp->ibqp); @@ -2940,7 +2884,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, * nes_hw_modify_qp */ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, - u32 next_iwarp_state, u32 termlen, u32 wait_completion) + u32 next_iwarp_state, u32 wait_completion) { struct nes_hw_cqp_wqe *cqp_wqe; /* struct iw_cm_id *cm_id = nesqp->cm_id; */ @@ -2972,13 +2916,6 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); - /* If sending a terminate message, fill in the length (in words) */ - if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) && - !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) { - termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT; - set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen); - } - atomic_set(&cqp_request->refcount, 2); nes_post_cqp_request(nesdev, cqp_request); @@ -3149,9 +3086,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", nesqp->hwqp.qp_id); - if (nesqp->term_flags) - del_timer(&nesqp->terminate_timer); - next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ if (nesqp->hte_added) { @@ -3229,7 +3163,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (issue_modify_qp) { nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); - ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1); + ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1); if (ret) nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" " failed for QP%u.\n", @@ -3394,12 +3328,6 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, head = nesqp->hwqp.sq_head; while (ib_wr) { - /* Check for QP error */ - if (nesqp->term_flags) { - err = -EINVAL; - break; - } - /* Check for SQ overflow */ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { err = -EINVAL; @@ -3556,12 +3484,6 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, head = nesqp->hwqp.rq_head; while (ib_wr) { - /* Check for QP error */ - if (nesqp->term_flags) { - err = -EINVAL; - break; - } - if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { err = -EINVAL; break; @@ -3625,6 +3547,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { u64 u64temp; u64 wrid; + /* u64 u64temp; */ unsigned long flags = 0; struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); struct nes_device *nesdev = nesvnic->nesdev; @@ -3632,13 +3555,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) struct nes_qp *nesqp; struct nes_hw_cqe cqe; u32 head; - u32 wq_tail = 0; + u32 wq_tail; u32 cq_size; u32 cqe_count = 0; u32 wqe_index; u32 u32temp; - u32 move_cq_head = 1; - u32 err_code; + /* u32 counter; */ nes_debug(NES_DBG_CQ, "\n"); @@ -3648,40 +3570,29 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) cq_size = nescq->hw_cq.cq_size; while (cqe_count < num_entries) { - if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & - NES_CQE_VALID) == 0) - break; - - /* - * Make sure we read CQ entry contents *after* - * we've checked the valid bit. - */ - rmb(); - - cqe = nescq->hw_cq.cq_vbase[head]; - u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); - wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1); - u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); - /* parse CQE, get completion context from WQE (either rq or sq) */ - u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | - ((u64)u32temp); - - if (u64temp) { - nesqp = (struct nes_qp *)(unsigned long)u64temp; + if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & + NES_CQE_VALID) { + /* + * Make sure we read CQ entry contents *after* + * we've checked the valid bit. + */ + rmb(); + + cqe = nescq->hw_cq.cq_vbase[head]; + nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; + u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); + wqe_index = u32temp & + (nesdev->nesadapter->max_qp_wr - 1); + u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); + /* parse CQE, get completion context from WQE (either rq or sq */ + u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | + ((u64)u32temp); + nesqp = *((struct nes_qp **)&u64temp); memset(entry, 0, sizeof *entry); if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { entry->status = IB_WC_SUCCESS; } else { - err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]); - if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) { - entry->status = err_code & 0x0000ffff; - - /* The rest of the cqe's will be marked as flushed */ - nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] = - cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) | - NES_IWARP_CQE_MINOR_FLUSH); - } else - entry->status = IB_WC_WR_FLUSH_ERR; + entry->status = IB_WC_WR_FLUSH_ERR; } entry->qp = &nesqp->ibqp; @@ -3690,18 +3601,20 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { if (nesqp->skip_lsmm) { nesqp->skip_lsmm = 0; - nesqp->hwqp.sq_tail++; + wq_tail = nesqp->hwqp.sq_tail++; } /* Working on a SQ Completion*/ - wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index]. + wq_tail = wqe_index; + nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1); + wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | - ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index]. + ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); - entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index]. + entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); - switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index]. + switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { case NES_IWARP_SQ_OP_RDMAW: nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); @@ -3710,7 +3623,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) case NES_IWARP_SQ_OP_RDMAR: nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); entry->opcode = IB_WC_RDMA_READ; - entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index]. + entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); break; case NES_IWARP_SQ_OP_SENDINV: @@ -3721,54 +3634,33 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) entry->opcode = IB_WC_SEND; break; } - - nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1); - if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) { - move_cq_head = 0; - wq_tail = nesqp->hwqp.sq_tail; - } } else { /* Working on a RQ Completion*/ + wq_tail = wqe_index; + nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1); entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); - wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | - ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); + wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | + ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); entry->opcode = IB_WC_RECV; - - nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1); - if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) { - move_cq_head = 0; - wq_tail = nesqp->hwqp.rq_tail; - } } - entry->wr_id = wrid; - entry++; - cqe_count++; - } - if (move_cq_head) { - nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; if (++head >= cq_size) head = 0; + cqe_count++; nescq->polled_completions++; - if ((nescq->polled_completions > (cq_size / 2)) || (nescq->polled_completions == 255)) { nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" - " are pending %u of %u.\n", - nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); + " are pending %u of %u.\n", + nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); nes_write32(nesdev->regs+NES_CQE_ALLOC, - nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); + nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); nescq->polled_completions = 0; } - } else { - /* Update the wqe index and set status to flush */ - wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); - wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail; - nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = - cpu_to_le32(wqe_index); - move_cq_head = 1; /* ready for next pass */ - } + entry++; + } else + break; } if (nescq->polled_completions) { diff --git a/trunk/drivers/infiniband/hw/nes/nes_verbs.h b/trunk/drivers/infiniband/hw/nes/nes_verbs.h index 89822d75f82e..41c07f29f7c9 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_verbs.h +++ b/trunk/drivers/infiniband/hw/nes/nes_verbs.h @@ -40,10 +40,6 @@ struct nes_device; #define NES_MAX_USER_DB_REGIONS 4096 #define NES_MAX_USER_WQ_REGIONS 4096 -#define NES_TERM_SENT 0x01 -#define NES_TERM_RCVD 0x02 -#define NES_TERM_DONE 0x04 - struct nes_ucontext { struct ib_ucontext ibucontext; struct nes_device *nesdev; @@ -123,11 +119,6 @@ struct nes_wq { spinlock_t lock; }; -struct disconn_work { - struct work_struct work; - struct nes_qp *nesqp; -}; - struct iw_cm_id; struct ietf_mpa_frame; @@ -136,6 +127,7 @@ struct nes_qp { void *allocated_buffer; struct iw_cm_id *cm_id; struct workqueue_struct *wq; + struct work_struct disconn_work; struct nes_cq *nesscq; struct nes_cq *nesrcq; struct nes_pd *nespd; @@ -163,13 +155,9 @@ struct nes_qp { void *pbl_vbase; dma_addr_t pbl_pbase; struct page *page; - struct timer_list terminate_timer; - enum ib_event_type terminate_eventtype; wait_queue_head_t kick_waitq; u16 in_disconnect; u16 private_data_len; - u16 term_sq_flush_code; - u16 term_rq_flush_code; u8 active_conn; u8 skip_lsmm; u8 user_mode; @@ -177,7 +165,7 @@ struct nes_qp { u8 hw_iwarp_state; u8 flush_issued; u8 hw_tcp_state; - u8 term_flags; + u8 disconn_pending; u8 destroyed; }; #endif /* NES_VERBS_H */ diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 8f4b4fca2a1d..181b1f32325f 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -31,6 +31,7 @@ */ #include +#include #include #include #include diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c index e35f4a0ea9d5..e7e5adf84e84 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -36,6 +36,7 @@ #include #include +#include #include #include diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2bf5116deec4..e319d91f60a6 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -604,11 +604,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) skb_queue_len(&neigh->queue)); goto err_drop; } - } else { - spin_unlock_irqrestore(&priv->lock, flags); + } else ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); - return; - } } else { neigh->ah = NULL; @@ -691,9 +688,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ipoib_dbg(priv, "Send unicast ARP to %04x\n", be16_to_cpu(path->pathrec.dlid)); - spin_unlock_irqrestore(&priv->lock, flags); ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); - return; } else if ((path->query || !path_rec_start(dev, path)) && skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { /* put pseudoheader back on for next time */ diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 25874fc680c9..a0e97532e714 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -720,9 +720,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) } } - spin_unlock_irqrestore(&priv->lock, flags); ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); - return; } unlock: @@ -760,20 +758,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev) } } -static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, - const u8 *broadcast) -{ - if (addrlen != INFINIBAND_ALEN) - return 0; - /* reserved QPN, prefix, scope */ - if (memcmp(addr, broadcast, 6)) - return 0; - /* signature lower, pkey */ - if (memcmp(addr + 7, broadcast + 7, 3)) - return 0; - return 1; -} - void ipoib_mcast_restart_task(struct work_struct *work) { struct ipoib_dev_priv *priv = @@ -807,11 +791,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) for (mclist = dev->mc_list; mclist; mclist = mclist->next) { union ib_gid mgid; - if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, - mclist->dmi_addrlen, - dev->broadcast)) - continue; - memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); mcast = __ipoib_mcast_find(dev, &mgid); diff --git a/trunk/drivers/net/cxgb3/cxgb3_main.c b/trunk/drivers/net/cxgb3/cxgb3_main.c index c97ab82ec743..fb5df5c6203e 100644 --- a/trunk/drivers/net/cxgb3/cxgb3_main.c +++ b/trunk/drivers/net/cxgb3/cxgb3_main.c @@ -1286,7 +1286,6 @@ static int cxgb_open(struct net_device *dev) if (!other_ports) schedule_chk_task(adapter); - cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id); return 0; } @@ -1319,7 +1318,6 @@ static int cxgb_close(struct net_device *dev) if (!adapter->open_device_map) cxgb_down(adapter); - cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); return 0; } @@ -2719,7 +2717,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset) if (is_offload(adapter) && test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { - cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); + cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); offload_close(&adapter->tdev); } @@ -2784,7 +2782,7 @@ static void t3_resume_ports(struct adapter *adapter) } if (is_offload(adapter) && !ofld_disable) - cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); + cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); } /* diff --git a/trunk/drivers/net/cxgb3/cxgb3_offload.c b/trunk/drivers/net/cxgb3/cxgb3_offload.c index 75064eea1d87..f9f54b57b28c 100644 --- a/trunk/drivers/net/cxgb3/cxgb3_offload.c +++ b/trunk/drivers/net/cxgb3/cxgb3_offload.c @@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev) mutex_unlock(&cxgb3_db_lock); } -void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) +void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error) { struct cxgb3_client *client; mutex_lock(&cxgb3_db_lock); list_for_each_entry(client, &client_list, client_list) { - if (client->event_handler) - client->event_handler(tdev, event, port); + if (client->err_handler) + client->err_handler(tdev, status, error); } mutex_unlock(&cxgb3_db_lock); } diff --git a/trunk/drivers/net/cxgb3/cxgb3_offload.h b/trunk/drivers/net/cxgb3/cxgb3_offload.h index 670aa62042da..55945f422aec 100644 --- a/trunk/drivers/net/cxgb3/cxgb3_offload.h +++ b/trunk/drivers/net/cxgb3/cxgb3_offload.h @@ -64,16 +64,14 @@ void cxgb3_register_client(struct cxgb3_client *client); void cxgb3_unregister_client(struct cxgb3_client *client); void cxgb3_add_clients(struct t3cdev *tdev); void cxgb3_remove_clients(struct t3cdev *tdev); -void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port); +void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error); typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb, void *ctx); enum { OFFLOAD_STATUS_UP, - OFFLOAD_STATUS_DOWN, - OFFLOAD_PORT_DOWN, - OFFLOAD_PORT_UP + OFFLOAD_STATUS_DOWN }; struct cxgb3_client { @@ -84,7 +82,7 @@ struct cxgb3_client { int (*redirect)(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t); struct list_head client_list; - void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port); + void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error); }; /* diff --git a/trunk/drivers/net/mlx4/cq.c b/trunk/drivers/net/mlx4/cq.c index ccfe276943f0..ac57b6a42c6e 100644 --- a/trunk/drivers/net/mlx4/cq.c +++ b/trunk/drivers/net/mlx4/cq.c @@ -34,6 +34,7 @@ * SOFTWARE. */ +#include #include #include diff --git a/trunk/drivers/net/mlx4/eq.c b/trunk/drivers/net/mlx4/eq.c index bffb7995cb70..b9ceddde46c0 100644 --- a/trunk/drivers/net/mlx4/eq.c +++ b/trunk/drivers/net/mlx4/eq.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include #include #include @@ -40,10 +41,6 @@ #include "mlx4.h" #include "fw.h" -enum { - MLX4_IRQNAME_SIZE = 64 -}; - enum { MLX4_NUM_ASYNC_EQE = 0x100, MLX4_NUM_SPARE_EQE = 0x80, @@ -529,6 +526,48 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) iounmap(priv->clr_base); } +int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int ret; + + /* + * We assume that mapping one page is enough for the whole EQ + * context table. This is fine with all current HCAs, because + * we only use 32 EQs and each EQ uses 64 bytes of context + * memory, or 1 KB total. + */ + priv->eq_table.icm_virt = icm_virt; + priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); + if (!priv->eq_table.icm_page) + return -ENOMEM; + priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { + __free_page(priv->eq_table.icm_page); + return -ENOMEM; + } + + ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); + if (ret) { + pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->eq_table.icm_page); + } + + return ret; +} + +void mlx4_unmap_eq_icm(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); + pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->eq_table.icm_page); +} + int mlx4_alloc_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -576,9 +615,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) priv->eq_table.clr_int = priv->clr_base + (priv->eq_table.inta_pin < 32 ? 4 : 0); - priv->eq_table.irq_names = - kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), - GFP_KERNEL); + priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); if (!priv->eq_table.irq_names) { err = -ENOMEM; goto err_out_bitmap; @@ -601,25 +638,17 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) goto err_out_comp; if (dev->flags & MLX4_FLAG_MSI_X) { + static const char async_eq_name[] = "mlx4-async"; const char *eq_name; for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { if (i < dev->caps.num_comp_vectors) { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-comp-%d@pci:%s", i, - pci_name(dev->pdev)); - } else { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-async@pci:%s", - pci_name(dev->pdev)); - } + snprintf(priv->eq_table.irq_names + i * 16, 16, + "mlx4-comp-%d", i); + eq_name = priv->eq_table.irq_names + i * 16; + } else + eq_name = async_eq_name; - eq_name = priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE; err = request_irq(priv->eq_table.eq[i].irq, mlx4_msi_x_interrupt, 0, eq_name, priv->eq_table.eq + i); @@ -629,12 +658,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) priv->eq_table.eq[i].have_irq = 1; } } else { - snprintf(priv->eq_table.irq_names, - MLX4_IRQNAME_SIZE, - DRV_NAME "@pci:%s", - pci_name(dev->pdev)); err = request_irq(dev->pdev->irq, mlx4_interrupt, - IRQF_SHARED, priv->eq_table.irq_names, dev); + IRQF_SHARED, DRV_NAME, dev); if (err) goto err_out_async; diff --git a/trunk/drivers/net/mlx4/icm.c b/trunk/drivers/net/mlx4/icm.c index 04b382fcb8c8..baf4bf66062c 100644 --- a/trunk/drivers/net/mlx4/icm.c +++ b/trunk/drivers/net/mlx4/icm.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include #include #include diff --git a/trunk/drivers/net/mlx4/main.c b/trunk/drivers/net/mlx4/main.c index 3dd481e77f92..dac621b1e9fc 100644 --- a/trunk/drivers/net/mlx4/main.c +++ b/trunk/drivers/net/mlx4/main.c @@ -525,10 +525,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, goto err_unmap_aux; } - err = mlx4_init_icm_table(dev, &priv->eq_table.table, - init_hca->eqc_base, dev_cap->eqc_entry_sz, - dev->caps.num_eqs, dev->caps.num_eqs, - 0, 0); + err = mlx4_map_eq_icm(dev, init_hca->eqc_base); if (err) { mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_cmpt; @@ -671,7 +668,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); err_unmap_eq: - mlx4_cleanup_icm_table(dev, &priv->eq_table.table); + mlx4_unmap_eq_icm(dev); err_unmap_cmpt: mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); @@ -701,11 +698,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev) mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); - mlx4_cleanup_icm_table(dev, &priv->eq_table.table); mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + mlx4_unmap_eq_icm(dev); mlx4_UNMAP_ICM_AUX(dev); mlx4_free_icm(dev, priv->fw.aux_icm, 0); @@ -789,7 +786,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) return 0; err_close: - mlx4_CLOSE_HCA(dev, 0); + mlx4_close_hca(dev); err_free_icm: mlx4_free_icms(dev); @@ -1073,12 +1070,18 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_disable_pdev; } - err = pci_request_regions(pdev, DRV_NAME); + err = pci_request_region(pdev, 0, DRV_NAME); if (err) { - dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); goto err_disable_pdev; } + err = pci_request_region(pdev, 2, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n"); + goto err_release_bar0; + } + pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -1087,7 +1090,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); - goto err_release_regions; + goto err_release_bar2; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -1098,7 +1101,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " "aborting.\n"); - goto err_release_regions; + goto err_release_bar2; } } @@ -1107,7 +1110,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; - goto err_release_regions; + goto err_release_bar2; } dev = &priv->dev; @@ -1202,8 +1205,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) err_free_dev: kfree(priv); -err_release_regions: - pci_release_regions(pdev); +err_release_bar2: + pci_release_region(pdev, 2); + +err_release_bar0: + pci_release_region(pdev, 0); err_disable_pdev: pci_disable_device(pdev); @@ -1259,7 +1265,8 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_disable_msix(pdev); kfree(priv); - pci_release_regions(pdev); + pci_release_region(pdev, 2); + pci_release_region(pdev, 0); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/trunk/drivers/net/mlx4/mcg.c b/trunk/drivers/net/mlx4/mcg.c index 5ccbce9866fe..6053c357a470 100644 --- a/trunk/drivers/net/mlx4/mcg.c +++ b/trunk/drivers/net/mlx4/mcg.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include #include diff --git a/trunk/drivers/net/mlx4/mlx4.h b/trunk/drivers/net/mlx4/mlx4.h index bc72d6e4919b..5bd79c2b184f 100644 --- a/trunk/drivers/net/mlx4/mlx4.h +++ b/trunk/drivers/net/mlx4/mlx4.h @@ -205,7 +205,9 @@ struct mlx4_eq_table { void __iomem **uar_map; u32 clr_mask; struct mlx4_eq *eq; - struct mlx4_icm_table table; + u64 icm_virt; + struct page *icm_page; + dma_addr_t icm_dma; struct mlx4_icm_table cmpt_table; int have_irq; u8 inta_pin; @@ -371,6 +373,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, struct mlx4_init_hca_param *init_hca); +int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt); +void mlx4_unmap_eq_icm(struct mlx4_dev *dev); + int mlx4_cmd_init(struct mlx4_dev *dev); void mlx4_cmd_cleanup(struct mlx4_dev *dev); void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); diff --git a/trunk/drivers/net/mlx4/mr.c b/trunk/drivers/net/mlx4/mr.c index ca7ab8e7b4cc..f96948be0a44 100644 --- a/trunk/drivers/net/mlx4/mr.c +++ b/trunk/drivers/net/mlx4/mr.c @@ -32,6 +32,7 @@ * SOFTWARE. */ +#include #include #include diff --git a/trunk/drivers/net/mlx4/pd.c b/trunk/drivers/net/mlx4/pd.c index c4988d6bd5b2..26d1a7a9e375 100644 --- a/trunk/drivers/net/mlx4/pd.c +++ b/trunk/drivers/net/mlx4/pd.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include #include diff --git a/trunk/drivers/net/mlx4/profile.c b/trunk/drivers/net/mlx4/profile.c index ca25b9dc8378..bd22df95adf9 100644 --- a/trunk/drivers/net/mlx4/profile.c +++ b/trunk/drivers/net/mlx4/profile.c @@ -32,6 +32,8 @@ * SOFTWARE. */ +#include + #include "mlx4.h" #include "fw.h" diff --git a/trunk/drivers/net/mlx4/qp.c b/trunk/drivers/net/mlx4/qp.c index 42ab9fc01d3e..1c565ef8d179 100644 --- a/trunk/drivers/net/mlx4/qp.c +++ b/trunk/drivers/net/mlx4/qp.c @@ -33,6 +33,8 @@ * SOFTWARE. */ +#include + #include #include diff --git a/trunk/drivers/net/mlx4/reset.c b/trunk/drivers/net/mlx4/reset.c index e5741dab3825..3951b884c0fb 100644 --- a/trunk/drivers/net/mlx4/reset.c +++ b/trunk/drivers/net/mlx4/reset.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include #include #include diff --git a/trunk/drivers/net/mlx4/srq.c b/trunk/drivers/net/mlx4/srq.c index 1377d0dc8f1f..fe9f218691f5 100644 --- a/trunk/drivers/net/mlx4/srq.c +++ b/trunk/drivers/net/mlx4/srq.c @@ -31,6 +31,8 @@ * SOFTWARE. */ +#include + #include #include "mlx4.h" diff --git a/trunk/drivers/scsi/cxgb3i/cxgb3i_init.c b/trunk/drivers/scsi/cxgb3i/cxgb3i_init.c index d0ab23a58355..042d9bce9914 100644 --- a/trunk/drivers/scsi/cxgb3i/cxgb3i_init.c +++ b/trunk/drivers/scsi/cxgb3i/cxgb3i_init.c @@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION); static void open_s3_dev(struct t3cdev *); static void close_s3_dev(struct t3cdev *); -static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port); +static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error); static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; static struct cxgb3_client t3c_client = { @@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = { .handlers = cxgb3i_cpl_handlers, .add = open_s3_dev, .remove = close_s3_dev, - .event_handler = s3_event_handler, + .err_handler = s3_err_handler, }; /** @@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev) cxgb3i_ddp_cleanup(t3dev); } -static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port) +static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error) { struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev); - cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n", - snic, tdev, event, port); + cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n", + snic, tdev, status, error); if (!snic) return; - switch (event) { + switch (status) { case OFFLOAD_STATUS_DOWN: snic->flags |= CXGB3I_ADAPTER_FLAG_RESET; break;