diff --git a/[refs] b/[refs] index be2b3b5a8393..51fd35e47a3e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: db4106ce635830201fad1bfca731a635beab6a72 +refs/heads/master: 91018f8632e09e3a617c9fc2efbbdaa2922d2fe7 diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c index 6de8463f453b..bea5839d89ee 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) * Assumes qhp lock is held. */ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, - struct iwch_cq *schp) + struct iwch_cq *schp, unsigned long *flag) { int count; int flushed; @@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); /* take a ref on the qhp since we must release the lock */ atomic_inc(&qhp->refcnt); - spin_unlock(&qhp->lock); + spin_unlock_irqrestore(&qhp->lock, *flag); /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock(&rchp->lock); + spin_lock_irqsave(&rchp->lock, *flag); spin_lock(&qhp->lock); cxio_flush_hw_cq(&rchp->cq); cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); - spin_unlock(&rchp->lock); + spin_unlock_irqrestore(&rchp->lock, *flag); if (flushed) { - spin_lock(&rchp->comp_handler_lock); + spin_lock_irqsave(&rchp->comp_handler_lock, *flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock(&rchp->comp_handler_lock); + spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); } /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock(&schp->lock); + spin_lock_irqsave(&schp->lock, *flag); spin_lock(&qhp->lock); cxio_flush_hw_cq(&schp->cq); cxio_count_scqes(&schp->cq, &qhp->wq, &count); flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); - spin_unlock(&schp->lock); + spin_unlock_irqrestore(&schp->lock, *flag); if (flushed) { - spin_lock(&schp->comp_handler_lock); + spin_lock_irqsave(&schp->comp_handler_lock, *flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock(&schp->comp_handler_lock); + spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); } /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); - spin_lock(&qhp->lock); + spin_lock_irqsave(&qhp->lock, *flag); } -static void flush_qp(struct iwch_qp *qhp) +static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) { struct iwch_cq *rchp, *schp; @@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp) if (qhp->ibqp.uobject) { cxio_set_wq_in_error(&qhp->wq); cxio_set_cq_in_error(&rchp->cq); - spin_lock(&rchp->comp_handler_lock); + spin_lock_irqsave(&rchp->comp_handler_lock, *flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock(&rchp->comp_handler_lock); + spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); if (schp != rchp) { cxio_set_cq_in_error(&schp->cq); - spin_lock(&schp->comp_handler_lock); + spin_lock_irqsave(&schp->comp_handler_lock, *flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock(&schp->comp_handler_lock); + spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); } return; } - __flush_qp(qhp, rchp, schp); + __flush_qp(qhp, rchp, schp, flag); } @@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, break; case IWCH_QP_STATE_ERROR: qhp->attr.state = IWCH_QP_STATE_ERROR; - flush_qp(qhp); + flush_qp(qhp, &flag); break; default: ret = -EINVAL; @@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, } switch (attrs->next_state) { case IWCH_QP_STATE_IDLE: - flush_qp(qhp); + flush_qp(qhp, &flag); qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.llp_stream_handle = NULL; put_ep(&qhp->ep->com); @@ -1132,7 +1132,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, free=1; wake_up(&qhp->wait); BUG_ON(!ep); - flush_qp(qhp); + flush_qp(qhp, &flag); out: spin_unlock_irqrestore(&qhp->lock, flag); diff --git a/trunk/drivers/infiniband/hw/cxgb4/cm.c b/trunk/drivers/infiniband/hw/cxgb4/cm.c index 0668bb3472d0..006a35372b7a 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/cm.c +++ b/trunk/drivers/infiniband/hw/cxgb4/cm.c @@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ - if ((ep->mpa_attr.version == 2) && + if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1;