Skip to content

Commit

Permalink
IB/qib: Remove driver specific members from qib qp type
Browse files Browse the repository at this point in the history
In preparation for moving the queue pair data structure to rdmavt the
members of the driver specific queue pairs which are not common need to be
pushed off to a private driver structure. This structure will be available
in the queue pair once moved to rdmavt as a void pointer. This patch while
not adding a lot of value in and of itself is a prerequisite to move the
queue pair out of the drivers and into rdmavt.

The driver specific, private queue pair data structure should condense as
more of the send side code moves to rdmavt.

Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
  • Loading branch information
Dennis Dalessandro authored and Doug Ledford committed Mar 11, 2016
1 parent 869a2a9 commit ffc2690
Show file tree
Hide file tree
Showing 8 changed files with 145 additions and 89 deletions.
76 changes: 46 additions & 30 deletions drivers/infiniband/hw/qib/qib_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,10 +371,11 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
*/
static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
{
struct qib_qp_priv *priv = qp->priv;
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
atomic_set(&qp->s_dma_busy, 0);
atomic_set(&priv->s_dma_busy, 0);
qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0;
qp->s_wqe = NULL;
Expand Down Expand Up @@ -474,6 +475,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
*/
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
int ret = 0;
Expand All @@ -492,9 +494,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;

spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
list_del_init(&qp->iowait);
list_del_init(&priv->iowait);
}
spin_unlock(&dev->pending_lock);

Expand All @@ -504,9 +506,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
if (qp->s_tx) {
qib_put_txreq(qp->s_tx);
qp->s_tx = NULL;
if (priv->s_tx) {
qib_put_txreq(priv->s_tx);
priv->s_tx = NULL;
}
}

Expand Down Expand Up @@ -572,6 +574,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_qp *qp = to_iqp(ibqp);
struct qib_qp_priv *priv = qp->priv;
enum ib_qp_state cur_state, new_state;
struct ib_event ev;
int lastwqe = 0;
Expand Down Expand Up @@ -699,19 +702,20 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait))
list_del_init(&qp->iowait);
if (!list_empty(&priv->iowait))
list_del_init(&priv->iowait);
spin_unlock(&dev->pending_lock);
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock);
/* Stop the sending work queue and retry timer */
cancel_work_sync(&qp->s_work);
cancel_work_sync(&priv->s_work);
del_timer_sync(&qp->s_timer);
wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
if (qp->s_tx) {
qib_put_txreq(qp->s_tx);
qp->s_tx = NULL;
wait_event(priv->wait_dma,
!atomic_read(&priv->s_dma_busy));
if (priv->s_tx) {
qib_put_txreq(priv->s_tx);
priv->s_tx = NULL;
}
remove_qp(dev, qp);
wait_event(qp->wait, !atomic_read(&qp->refcount));
Expand Down Expand Up @@ -987,7 +991,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
size_t sg_list_sz;
struct ib_qp *ret;
gfp_t gfp;

struct qib_qp_priv *priv;

if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
Expand Down Expand Up @@ -1055,11 +1059,18 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
goto bail_swq;
}
RCU_INIT_POINTER(qp->next, NULL);
qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
if (!qp->s_hdr) {
priv = kzalloc(sizeof(*priv), gfp);
if (!priv) {
ret = ERR_PTR(-ENOMEM);
goto bail_qp_hdr;
}
priv->owner = qp;
priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
if (!priv->s_hdr) {
ret = ERR_PTR(-ENOMEM);
goto bail_qp;
}
qp->priv = priv;
qp->timeout_jiffies =
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1000UL);
Expand Down Expand Up @@ -1095,11 +1106,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0);
init_waitqueue_head(&qp->wait);
init_waitqueue_head(&qp->wait_dma);
init_waitqueue_head(&priv->wait_dma);
init_timer(&qp->s_timer);
qp->s_timer.data = (unsigned long)qp;
INIT_WORK(&qp->s_work, qib_do_send);
INIT_LIST_HEAD(&qp->iowait);
INIT_WORK(&priv->s_work, qib_do_send);
INIT_LIST_HEAD(&priv->iowait);
INIT_LIST_HEAD(&qp->rspwait);
qp->state = IB_QPS_RESET;
qp->s_wq = swq;
Expand Down Expand Up @@ -1189,7 +1200,9 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
vfree(qp->r_rq.wq);
free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
bail_qp:
kfree(qp->s_hdr);
kfree(priv->s_hdr);
kfree(priv);
bail_qp_hdr:
kfree(qp);
bail_swq:
vfree(swq);
Expand All @@ -1210,23 +1223,24 @@ int qib_destroy_qp(struct ib_qp *ibqp)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_qp_priv *priv = qp->priv;

/* Make sure HW and driver activity is stopped. */
spin_lock_irq(&qp->s_lock);
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait))
list_del_init(&qp->iowait);
if (!list_empty(&priv->iowait))
list_del_init(&priv->iowait);
spin_unlock(&dev->pending_lock);
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
spin_unlock_irq(&qp->s_lock);
cancel_work_sync(&qp->s_work);
cancel_work_sync(&priv->s_work);
del_timer_sync(&qp->s_timer);
wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
if (qp->s_tx) {
qib_put_txreq(qp->s_tx);
qp->s_tx = NULL;
wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
if (priv->s_tx) {
qib_put_txreq(priv->s_tx);
priv->s_tx = NULL;
}
remove_qp(dev, qp);
wait_event(qp->wait, !atomic_read(&qp->refcount));
Expand All @@ -1245,7 +1259,8 @@ int qib_destroy_qp(struct ib_qp *ibqp)
else
vfree(qp->r_rq.wq);
vfree(qp->s_wq);
kfree(qp->s_hdr);
kfree(priv->s_hdr);
kfree(priv);
kfree(qp);
return 0;
}
Expand Down Expand Up @@ -1368,6 +1383,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
{
struct qib_swqe *wqe;
struct qib_qp *qp = iter->qp;
struct qib_qp_priv *priv = qp->priv;

wqe = get_swqe_ptr(qp, qp->s_last);
seq_printf(s,
Expand All @@ -1379,8 +1395,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
wqe->wr.opcode,
qp->s_hdrwords,
qp->s_flags,
atomic_read(&qp->s_dma_busy),
!list_empty(&qp->iowait),
atomic_read(&priv->s_dma_busy),
!list_empty(&priv->iowait),
qp->timeout,
wqe->ssn,
qp->s_lsn,
Expand Down
7 changes: 4 additions & 3 deletions drivers/infiniband/hw/qib/qib_rc.c
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
*/
int qib_make_rc_req(struct qib_qp *qp)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_other_headers *ohdr;
struct qib_sge_state *ss;
Expand All @@ -244,9 +245,9 @@ int qib_make_rc_req(struct qib_qp *qp)
int ret = 0;
int delta;

ohdr = &qp->s_hdr->u.oth;
ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr->u.l.oth;
ohdr = &priv->s_hdr->u.l.oth;

/*
* The lock is needed to synchronize between the sending tasklet,
Expand All @@ -266,7 +267,7 @@ int qib_make_rc_req(struct qib_qp *qp)
if (qp->s_last == qp->s_head)
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) {
if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
Expand Down
18 changes: 11 additions & 7 deletions drivers/infiniband/hw/qib/qib_ruc.c
Original file line number Diff line number Diff line change
Expand Up @@ -675,6 +675,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
u16 lrh0;
u32 nwords;
Expand All @@ -685,17 +686,18 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = QIB_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
&qp->remote_ah_attr.grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
}
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
qp->remote_ah_attr.sl << 4;
qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
priv->s_hdr->lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
qp->remote_ah_attr.src_path_bits);
bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
Expand All @@ -717,7 +719,9 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
*/
void qib_do_send(struct work_struct *work)
{
struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
s_work);
struct qib_qp *qp = priv->owner;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
int (*make_req)(struct qib_qp *qp);
Expand Down Expand Up @@ -756,7 +760,7 @@ void qib_do_send(struct work_struct *work)
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
*/
if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
qp->s_cur_sge, qp->s_cur_size))
break;
/* Record that s_hdr is empty. */
Expand Down
17 changes: 11 additions & 6 deletions drivers/infiniband/hw/qib/qib_sdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,9 @@ int qib_sdma_running(struct qib_pportdata *ppd)
static void complete_sdma_err_req(struct qib_pportdata *ppd,
struct qib_verbs_txreq *tx)
{
atomic_inc(&tx->qp->s_dma_busy);
struct qib_qp_priv *priv = tx->qp->priv;

atomic_inc(&priv->s_dma_busy);
/* no sdma descriptors, so no unmap_desc */
tx->txreq.start_idx = 0;
tx->txreq.next_descq_idx = 0;
Expand Down Expand Up @@ -543,6 +545,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
u64 sdmadesc[2];
u32 dwoffset;
dma_addr_t addr;
struct qib_qp_priv *priv;

spin_lock_irqsave(&ppd->sdma_lock, flags);

Expand Down Expand Up @@ -644,8 +647,8 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);

atomic_inc(&tx->qp->s_dma_busy);
priv = tx->qp->priv;
atomic_inc(&priv->s_dma_busy);
tx->txreq.next_descq_idx = tail;
ppd->dd->f_sdma_update_tail(ppd, tail);
ppd->sdma_descq_added += tx->txreq.sg_count;
Expand All @@ -663,6 +666,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
unmap_desc(ppd, tail);
}
qp = tx->qp;
priv = qp->priv;
qib_put_txreq(tx);
spin_lock(&qp->r_lock);
spin_lock(&qp->s_lock);
Expand All @@ -679,6 +683,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,

busy:
qp = tx->qp;
priv = qp->priv;
spin_lock(&qp->s_lock);
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
struct qib_ibdev *dev;
Expand All @@ -690,16 +695,16 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
*/
tx->ss = ss;
tx->dwords = dwords;
qp->s_tx = tx;
priv->s_tx = tx;
dev = &ppd->dd->verbs_dev;
spin_lock(&dev->pending_lock);
if (list_empty(&qp->iowait)) {
if (list_empty(&priv->iowait)) {
struct qib_ibport *ibp;

ibp = &ppd->ibport_data;
ibp->n_dmawait++;
qp->s_flags |= QIB_S_WAIT_DMA_DESC;
list_add_tail(&qp->iowait, &dev->dmawait);
list_add_tail(&priv->iowait, &dev->dmawait);
}
spin_unlock(&dev->pending_lock);
qp->s_flags &= ~QIB_S_BUSY;
Expand Down
7 changes: 4 additions & 3 deletions drivers/infiniband/hw/qib/qib_uc.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
*/
int qib_make_uc_req(struct qib_qp *qp)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
struct qib_swqe *wqe;
unsigned long flags;
Expand All @@ -63,7 +64,7 @@ int qib_make_uc_req(struct qib_qp *qp)
if (qp->s_last == qp->s_head)
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) {
if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
Expand All @@ -72,9 +73,9 @@ int qib_make_uc_req(struct qib_qp *qp)
goto done;
}

ohdr = &qp->s_hdr->u.oth;
ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr->u.l.oth;
ohdr = &priv->s_hdr->u.l.oth;

/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
Expand Down
Loading

0 comments on commit ffc2690

Please sign in to comment.