From 0af883a1067fba6df59c660a1616d7bdc346c941 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Thu, 6 Oct 2011 09:32:44 -0700 Subject: [PATCH] --- yaml --- r: 272324 b: refs/heads/master c: 9efe10a1e1a1ab1dba0af0f520e0697f6e81ebf1 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/infiniband/hw/cxgb3/iwch_ev.c | 6 --- .../infiniband/hw/cxgb3/iwch_provider.c | 1 - .../infiniband/hw/cxgb3/iwch_provider.h | 1 - trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c | 14 +------ trunk/drivers/infiniband/hw/cxgb4/device.c | 41 +++++++++++++++---- 6 files changed, 36 insertions(+), 29 deletions(-) diff --git a/[refs] b/[refs] index 2b24db33e556..ed8b580b03e2 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f7cc25d018f1e9af6767ee7774bbe83452e9fdf4 +refs/heads/master: 9efe10a1e1a1ab1dba0af0f520e0697f6e81ebf1 diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_ev.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_ev.c index abcc9e76962b..71e0d845da3d 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_ev.c @@ -46,7 +46,6 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, struct ib_event event; struct iwch_qp_attributes attrs; struct iwch_qp *qhp; - unsigned long flag; spin_lock(&rnicp->lock); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); @@ -95,9 +94,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); - spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); - spin_unlock_irqrestore(&chp->comp_handler_lock, flag); if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); @@ -110,7 +107,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) struct iwch_cq *chp; struct iwch_qp *qhp; u32 cqid = RSPQ_CQID(rsp_msg); - unsigned long flag; rnicp = (struct iwch_dev *) rdev_p->ulp; spin_lock(&rnicp->lock); @@ -174,9 +170,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) */ if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) dst_confirm(qhp->ep->dst); - spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); - spin_unlock_irqrestore(&chp->comp_handler_lock, flag); break; case TPT_ERR_STAG: diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c index 37c224fc3ad9..c7d9411f2954 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -190,7 +190,6 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); - spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h index 87c14b0c5ac0..9a342c9b220d 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.h @@ -103,7 +103,6 @@ struct iwch_cq { struct iwch_dev *rhp; struct t3_cq cq; spinlock_t lock; - spinlock_t comp_handler_lock; atomic_t refcnt; wait_queue_head_t wait; u32 __user *user_rptr_addr; diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c index bea5839d89ee..ecd313f359a4 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -822,11 +822,8 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, *flag); - if (flushed) { - spin_lock_irqsave(&rchp->comp_handler_lock, *flag); + if (flushed) (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); - } /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&schp->lock, *flag); @@ -836,11 +833,8 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&schp->lock, *flag); - if (flushed) { - spin_lock_irqsave(&schp->comp_handler_lock, *flag); + if (flushed) (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); - } /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) @@ -859,15 +853,11 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) if (qhp->ibqp.uobject) { cxio_set_wq_in_error(&qhp->wq); cxio_set_cq_in_error(&rchp->cq); - spin_lock_irqsave(&rchp->comp_handler_lock, *flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); if (schp != rchp) { cxio_set_cq_in_error(&schp->cq); - spin_lock_irqsave(&schp->comp_handler_lock, *flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); } return; } diff --git a/trunk/drivers/infiniband/hw/cxgb4/device.c b/trunk/drivers/infiniband/hw/cxgb4/device.c index 40a13cc633a3..6d0df6ec161b 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/device.c +++ b/trunk/drivers/infiniband/hw/cxgb4/device.c @@ -376,10 +376,8 @@ struct uld_ctx { struct c4iw_dev *dev; }; -static void c4iw_remove(struct uld_ctx *ctx) +static void c4iw_dealloc(struct uld_ctx *ctx) { - PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); - c4iw_unregister_device(ctx->dev); c4iw_rdev_close(&ctx->dev->rdev); idr_destroy(&ctx->dev->cqidr); idr_destroy(&ctx->dev->qpidr); @@ -389,11 +387,30 @@ static void c4iw_remove(struct uld_ctx *ctx) ctx->dev = NULL; } +static void c4iw_remove(struct uld_ctx *ctx) +{ + PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); + c4iw_unregister_device(ctx->dev); + c4iw_dealloc(ctx); +} + +static int rdma_supported(const struct cxgb4_lld_info *infop) +{ + return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && + infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && + infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; +} + static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) { struct c4iw_dev *devp; int ret; + if (!rdma_supported(infop)) { + printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n", + pci_name(infop->pdev)); + return ERR_PTR(-ENOSYS); + } devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); if (!devp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); @@ -414,7 +431,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ret = c4iw_rdev_open(&devp->rdev); if (ret) { - mutex_unlock(&dev_mutex); printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); ib_dealloc_device(&devp->ibdev); return ERR_PTR(ret); @@ -519,15 +535,24 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) case CXGB4_STATE_UP: printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); if (!ctx->dev) { - int ret = 0; + int ret; ctx->dev = c4iw_alloc(&ctx->lldi); - if (!IS_ERR(ctx->dev)) - ret = c4iw_register_device(ctx->dev); - if (IS_ERR(ctx->dev) || ret) + if (IS_ERR(ctx->dev)) { + printk(KERN_ERR MOD + "%s: initialization failed: %ld\n", + pci_name(ctx->lldi.pdev), + PTR_ERR(ctx->dev)); + ctx->dev = NULL; + break; + } + ret = c4iw_register_device(ctx->dev); + if (ret) { printk(KERN_ERR MOD "%s: RDMA registration failed: %d\n", pci_name(ctx->lldi.pdev), ret); + c4iw_dealloc(ctx); + } } break; case CXGB4_STATE_DOWN: