Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 33454
b: refs/heads/master
c: a19aa5c
h: refs/heads/master
v: v3
  • Loading branch information
Roland Dreier committed Aug 11, 2006
1 parent 12dcb45 commit 8f9e850
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e54b82d739d4a2ef992976c8c0692cdf89286420
refs/heads/master: a19aa5c5fdda8b556ab238177ee27c5ef7873c94
4 changes: 2 additions & 2 deletions trunk/drivers/infiniband/hw/mthca/mthca_provider.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,8 @@ struct mthca_ah {
* We have one global lock that protects dev->cq/qp_table. Each
* struct mthca_cq/qp also has its own lock. An individual qp lock
* may be taken inside of an individual cq lock. Both cqs attached to
* a qp may be locked, with the send cq locked first. No other
* nesting should be done.
* a qp may be locked, with the cq with the lower cqn locked first.
* No other nesting should be done.
*
* Each struct mthca_cq/qp also has an ref count, protected by the
* corresponding table lock. The pointer from the cq/qp_table to the
Expand Down
42 changes: 30 additions & 12 deletions trunk/drivers/infiniband/hw/mthca/mthca_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1263,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev,
return 0;
}

static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_lock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irq(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}

static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_unlock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
}
}

int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
Expand Down Expand Up @@ -1315,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
spin_lock_irq(&send_cq->lock);
if (send_cq != recv_cq)
spin_lock(&recv_cq->lock);
mthca_lock_cqs(send_cq, recv_cq);

spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mqpn);
spin_unlock(&dev->qp_table.lock);

if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
mthca_unlock_cqs(send_cq, recv_cq);

err_out:
dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
Expand Down Expand Up @@ -1359,19 +1381,15 @@ void mthca_free_qp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
spin_lock_irq(&send_cq->lock);
if (send_cq != recv_cq)
spin_lock(&recv_cq->lock);
mthca_lock_cqs(send_cq, recv_cq);

spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1));
--qp->refcount;
spin_unlock(&dev->qp_table.lock);

if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
mthca_unlock_cqs(send_cq, recv_cq);

wait_event(qp->wait, !get_qp_refcount(dev, qp));

Expand Down

0 comments on commit 8f9e850

Please sign in to comment.