Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 96712
b: refs/heads/master
c: e509be8
h: refs/heads/master
v: v3
  • Loading branch information
Ralph Campbell authored and Roland Dreier committed May 13, 2008
1 parent 46af667 commit da06c81
Show file tree
Hide file tree
Showing 9 changed files with 555 additions and 305 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 53dc1ca194c062aa9771e194047f27ec1ca592df
refs/heads/master: e509be898d8937634437caa474b57ac12795e5bc
183 changes: 91 additions & 92 deletions trunk/drivers/infiniband/hw/ipath/ipath_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,6 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
{
struct ipath_qp *q, **qpp;
unsigned long flags;
int fnd = 0;

spin_lock_irqsave(&qpt->lock, flags);

Expand All @@ -253,51 +252,40 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
*qpp = qp->next;
qp->next = NULL;
atomic_dec(&qp->refcount);
fnd = 1;
break;
}
}

spin_unlock_irqrestore(&qpt->lock, flags);

if (!fnd)
return;

free_qpn(qpt, qp->ibqp.qp_num);

wait_event(qp->wait, !atomic_read(&qp->refcount));
}

/**
* ipath_free_all_qps - remove all QPs from the table
* ipath_free_all_qps - check for QPs still in use
* @qpt: the QP table to empty
*
* There should not be any QPs still in use.
* Free memory for table.
*/
void ipath_free_all_qps(struct ipath_qp_table *qpt)
unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
{
unsigned long flags;
struct ipath_qp *qp, *nqp;
u32 n;
struct ipath_qp *qp;
u32 n, qp_inuse = 0;

spin_lock_irqsave(&qpt->lock, flags);
for (n = 0; n < qpt->max; n++) {
spin_lock_irqsave(&qpt->lock, flags);
qp = qpt->table[n];
qpt->table[n] = NULL;
spin_unlock_irqrestore(&qpt->lock, flags);

while (qp) {
nqp = qp->next;
free_qpn(qpt, qp->ibqp.qp_num);
if (!atomic_dec_and_test(&qp->refcount) ||
!ipath_destroy_qp(&qp->ibqp))
ipath_dbg("QP memory leak!\n");
qp = nqp;
}

for (; qp; qp = qp->next)
qp_inuse++;
}
spin_unlock_irqrestore(&qpt->lock, flags);

for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
if (qpt->map[n].page)
free_page((unsigned long)qpt->map[n].page);
}
free_page((unsigned long) qpt->map[n].page);
return qp_inuse;
}

/**
Expand Down Expand Up @@ -336,11 +324,12 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
qp->s_busy = 0;
atomic_set(&qp->s_dma_busy, 0);
qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0;
qp->s_wqe = NULL;
qp->s_pkt_delay = 0;
qp->s_draining = 0;
qp->s_psn = 0;
qp->r_psn = 0;
qp->r_msn = 0;
Expand All @@ -353,15 +342,15 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
}
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_nak_state = 0;
qp->r_wrid_valid = 0;
qp->r_aflags = 0;
qp->r_flags = 0;
qp->s_rnr_timeout = 0;
qp->s_head = 0;
qp->s_tail = 0;
qp->s_cur = 0;
qp->s_last = 0;
qp->s_ssn = 1;
qp->s_lsn = 0;
qp->s_wait_credit = 0;
memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
Expand All @@ -370,7 +359,6 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
qp->r_rq.wq->head = 0;
qp->r_rq.wq->tail = 0;
}
qp->r_reuse_sge = 0;
}

/**
Expand Down Expand Up @@ -402,39 +390,21 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);

wc.vendor_err = 0;
wc.byte_len = 0;
wc.imm_data = 0;
/* Schedule the sending tasklet to drain the send work queue. */
if (qp->s_last != qp->s_head)
ipath_schedule_send(qp);

memset(&wc, 0, sizeof(wc));
wc.qp = &qp->ibqp;
wc.src_qp = 0;
wc.wc_flags = 0;
wc.pkey_index = 0;
wc.slid = 0;
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = 0;
if (qp->r_wrid_valid) {
qp->r_wrid_valid = 0;
wc.opcode = IB_WC_RECV;

if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id;
wc.opcode = IB_WC_RECV;
wc.status = err;
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
}
wc.status = IB_WC_WR_FLUSH_ERR;

while (qp->s_last != qp->s_head) {
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);

wc.wr_id = wqe->wr.wr_id;
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
if (++qp->s_last >= qp->s_size)
qp->s_last = 0;
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
}
qp->s_cur = qp->s_tail = qp->s_head;
qp->s_hdrwords = 0;
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;

if (qp->r_rq.wq) {
struct ipath_rwq *wq;
u32 head;
Expand All @@ -450,7 +420,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
wc.opcode = IB_WC_RECV;
while (tail != head) {
wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
if (++tail >= qp->r_rq.size)
Expand Down Expand Up @@ -482,11 +451,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ipath_ibdev *dev = to_idev(ibqp->device);
struct ipath_qp *qp = to_iqp(ibqp);
enum ib_qp_state cur_state, new_state;
unsigned long flags;
int lastwqe = 0;
int ret;

spin_lock_irqsave(&qp->s_lock, flags);
spin_lock_irq(&qp->s_lock);

cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : qp->state;
Expand Down Expand Up @@ -539,16 +507,42 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,

switch (new_state) {
case IB_QPS_RESET:
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->timerwait))
list_del_init(&qp->timerwait);
if (!list_empty(&qp->piowait))
list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);
qp->s_flags &= ~IPATH_S_ANY_WAIT;
spin_unlock_irq(&qp->s_lock);
/* Stop the sending tasklet */
tasklet_kill(&qp->s_task);
wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
spin_lock_irq(&qp->s_lock);
}
ipath_reset_qp(qp, ibqp->qp_type);
break;

case IB_QPS_SQD:
qp->s_draining = qp->s_last != qp->s_cur;
qp->state = new_state;
break;

case IB_QPS_SQE:
if (qp->ibqp.qp_type == IB_QPT_RC)
goto inval;
qp->state = new_state;
break;

case IB_QPS_ERR:
lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
break;

default:
qp->state = new_state;
break;

}

if (attr_mask & IB_QP_PKEY_INDEX)
Expand Down Expand Up @@ -601,8 +595,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
qp->s_max_rd_atomic = attr->max_rd_atomic;

qp->state = new_state;
spin_unlock_irqrestore(&qp->s_lock, flags);
spin_unlock_irq(&qp->s_lock);

if (lastwqe) {
struct ib_event ev;
Expand All @@ -616,7 +609,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto bail;

inval:
spin_unlock_irqrestore(&qp->s_lock, flags);
spin_unlock_irq(&qp->s_lock);
ret = -EINVAL;

bail:
Expand Down Expand Up @@ -647,7 +640,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->pkey_index = qp->s_pkey_index;
attr->alt_pkey_index = 0;
attr->en_sqd_async_notify = 0;
attr->sq_draining = 0;
attr->sq_draining = qp->s_draining;
attr->max_rd_atomic = qp->s_max_rd_atomic;
attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
attr->min_rnr_timer = qp->r_min_rnr_timer;
Expand Down Expand Up @@ -837,6 +830,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0);
init_waitqueue_head(&qp->wait);
init_waitqueue_head(&qp->wait_dma);
tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
INIT_LIST_HEAD(&qp->piowait);
INIT_LIST_HEAD(&qp->timerwait);
Expand Down Expand Up @@ -930,6 +924,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
else
vfree(qp->r_rq.wq);
ipath_free_qp(&dev->qp_table, qp);
free_qpn(&dev->qp_table, qp->ibqp.qp_num);
bail_qp:
kfree(qp);
bail_swq:
Expand All @@ -951,41 +946,44 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
{
struct ipath_qp *qp = to_iqp(ibqp);
struct ipath_ibdev *dev = to_idev(ibqp->device);
unsigned long flags;

spin_lock_irqsave(&qp->s_lock, flags);
qp->state = IB_QPS_ERR;
spin_unlock_irqrestore(&qp->s_lock, flags);
spin_lock(&dev->n_qps_lock);
dev->n_qps_allocated--;
spin_unlock(&dev->n_qps_lock);
/* Make sure HW and driver activity is stopped. */
spin_lock_irq(&qp->s_lock);
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->timerwait))
list_del_init(&qp->timerwait);
if (!list_empty(&qp->piowait))
list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);
qp->s_flags &= ~IPATH_S_ANY_WAIT;
spin_unlock_irq(&qp->s_lock);
/* Stop the sending tasklet */
tasklet_kill(&qp->s_task);
wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
} else
spin_unlock_irq(&qp->s_lock);

/* Stop the sending tasklet. */
tasklet_kill(&qp->s_task);
ipath_free_qp(&dev->qp_table, qp);

if (qp->s_tx) {
atomic_dec(&qp->refcount);
if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
kfree(qp->s_tx->txreq.map_addr);
spin_lock_irq(&dev->pending_lock);
list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
spin_unlock_irq(&dev->pending_lock);
qp->s_tx = NULL;
}

/* Make sure the QP isn't on the timeout list. */
spin_lock_irqsave(&dev->pending_lock, flags);
if (!list_empty(&qp->timerwait))
list_del_init(&qp->timerwait);
if (!list_empty(&qp->piowait))
list_del_init(&qp->piowait);
if (qp->s_tx)
list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
spin_unlock_irqrestore(&dev->pending_lock, flags);
wait_event(qp->wait, !atomic_read(&qp->refcount));

/*
* Make sure that the QP is not in the QPN table so receive
* interrupts will discard packets for this QP. XXX Also remove QP
* from multicast table.
*/
if (atomic_read(&qp->refcount) != 0)
ipath_free_qp(&dev->qp_table, qp);
/* all user's cleaned up, mark it available */
free_qpn(&dev->qp_table, qp->ibqp.qp_num);
spin_lock(&dev->n_qps_lock);
dev->n_qps_allocated--;
spin_unlock(&dev->n_qps_lock);

if (qp->ip)
kref_put(&qp->ip->ref, ipath_release_mmap_info);
Expand Down Expand Up @@ -1055,9 +1053,10 @@ void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
}

/* Restart sending if it was blocked due to lack of credits. */
if (qp->s_cur != qp->s_head &&
if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
qp->s_cur != qp->s_head &&
(qp->s_lsn == (u32) -1 ||
ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
qp->s_lsn + 1) <= 0))
tasklet_hi_schedule(&qp->s_task);
ipath_schedule_send(qp);
}
Loading

0 comments on commit da06c81

Please sign in to comment.