Skip to content

Commit

Permalink
IB/qib: Remove qp and mr functionality from qib
Browse files Browse the repository at this point in the history
Remove qp and mr support from qib and use rdmavt. These two changes
cannot be reasonably be split apart into separate patches because they
depend on each other in mulitple places. This paves the way to remove
even more functions in subsequent patches.

Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
  • Loading branch information
Dennis Dalessandro authored and Doug Ledford committed Mar 11, 2016
1 parent 6a9df40 commit 7c2e11f
Show file tree
Hide file tree
Showing 17 changed files with 308 additions and 1,314 deletions.
4 changes: 2 additions & 2 deletions drivers/infiniband/hw/qib/Makefile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o

ib_qib-y := qib_cq.o qib_diag.o qib_driver.o qib_eeprom.o \
qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \
qib_mad.o qib_mmap.o qib_pcie.o qib_pio_copy.o \
qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
Expand Down
14 changes: 7 additions & 7 deletions drivers/infiniband/hw/qib/qib.h
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ struct qib_ctxtdata {
/* ctxt rcvhdrq head offset */
u32 head;
/* lookaside fields */
struct qib_qp *lookaside_qp;
struct rvt_qp *lookaside_qp;
u32 lookaside_qpn;
/* QPs waiting for context processing */
struct list_head qp_wait_list;
Expand All @@ -241,7 +241,7 @@ struct qib_ctxtdata {
#endif
};

struct qib_sge_state;
struct rvt_sge_state;

struct qib_sdma_txreq {
int flags;
Expand All @@ -259,14 +259,14 @@ struct qib_sdma_desc {

struct qib_verbs_txreq {
struct qib_sdma_txreq txreq;
struct qib_qp *qp;
struct qib_swqe *wqe;
struct rvt_qp *qp;
struct rvt_swqe *wqe;
u32 dwords;
u16 hdr_dwords;
u16 hdr_inx;
struct qib_pio_header *align_buf;
struct qib_mregion *mr;
struct qib_sge_state *ss;
struct rvt_mregion *mr;
struct rvt_sge_state *ss;
};

#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
Expand Down Expand Up @@ -1324,7 +1324,7 @@ void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
struct list_head *pktlist);
int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/qib/qib_cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)

if (cq->ip) {
struct qib_ibdev *dev = to_idev(ibcq->device);
struct qib_mmap_info *ip = cq->ip;
struct rvt_mmap_info *ip = cq->ip;

qib_update_mmap_info(dev, ip, sz, wc);

Expand Down
4 changes: 2 additions & 2 deletions drivers/infiniband/hw/qib/qib_driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
struct qib_other_headers *ohdr = NULL;
struct qib_ibport *ibp = &ppd->ibport_data;
struct qib_qp *qp = NULL;
struct rvt_qp *qp = NULL;
u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
u16 lid = be16_to_cpu(hdr->lrh[1]);
int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
Expand Down Expand Up @@ -472,7 +472,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
int last;
u64 lval;
struct qib_qp *qp, *nqp;
struct rvt_qp *qp, *nqp;

l = rcd->head;
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
Expand Down
184 changes: 15 additions & 169 deletions drivers/infiniband/hw/qib/qib_keys.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,20 +46,20 @@
*
*/

int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
{
unsigned long flags;
u32 r;
u32 n;
int ret = 0;
struct qib_ibdev *dev = to_idev(mr->pd->device);
struct qib_lkey_table *rkt = &dev->lk_table;
struct rvt_lkey_table *rkt = &dev->lk_table;

spin_lock_irqsave(&rkt->lock, flags);

/* special case for dma_mr lkey == 0 */
if (dma_region) {
struct qib_mregion *tmr;
struct rvt_mregion *tmr;

tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
Expand Down Expand Up @@ -90,8 +90,8 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
* bits are capped in qib_verbs.c to insure enough bits
* for generation number
*/
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
<< 8);
if (mr->lkey == 0) {
mr->lkey |= 1 << 8;
Expand All @@ -114,21 +114,21 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
* qib_free_lkey - free an lkey
* @mr: mr to free from tables
*/
void qib_free_lkey(struct qib_mregion *mr)
void qib_free_lkey(struct rvt_mregion *mr)
{
unsigned long flags;
u32 lkey = mr->lkey;
u32 r;
struct qib_ibdev *dev = to_idev(mr->pd->device);
struct qib_lkey_table *rkt = &dev->lk_table;
struct rvt_lkey_table *rkt = &dev->lk_table;

spin_lock_irqsave(&rkt->lock, flags);
if (!mr->lkey_published)
goto out;
if (lkey == 0)
RCU_INIT_POINTER(dev->dma_mr, NULL);
else {
r = lkey >> (32 - ib_qib_lkey_table_size);
r = lkey >> (32 - ib_rvt_lkey_table_size);
RCU_INIT_POINTER(rkt->table[r], NULL);
}
qib_put_mr(mr);
Expand All @@ -137,105 +137,6 @@ void qib_free_lkey(struct qib_mregion *mr)
spin_unlock_irqrestore(&rkt->lock, flags);
}

/**
* qib_lkey_ok - check IB SGE for validity and initialize
* @rkt: table containing lkey to check SGE against
* @pd: protection domain
* @isge: outgoing internal SGE
* @sge: SGE to check
* @acc: access flags
*
* Return 1 if valid and successful, otherwise returns 0.
*
* increments the reference count upon success
*
* Check the IB SGE for validity and initialize our internal version
* of it.
*/
int qib_lkey_ok(struct qib_lkey_table *rkt, struct rvt_pd *pd,
struct qib_sge *isge, struct ib_sge *sge, int acc)
{
struct qib_mregion *mr;
unsigned n, m;
size_t off;

/*
* We use LKEY == zero for kernel virtual addresses
* (see qib_get_dma_mr and qib_dma.c).
*/
rcu_read_lock();
if (sge->lkey == 0) {
struct qib_ibdev *dev = to_idev(pd->ibpd.device);

if (pd->user)
goto bail;
mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail;
if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
goto bail;
rcu_read_unlock();

isge->mr = mr;
isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
isge->sge_length = sge->length;
isge->m = 0;
isge->n = 0;
goto ok;
}
mr = rcu_dereference(
rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail;

off = sge->addr - mr->user_base;
if (unlikely(sge->addr < mr->user_base ||
off + sge->length > mr->length ||
(mr->access_flags & acc) != acc))
goto bail;
if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
goto bail;
rcu_read_unlock();

off += mr->offset;
if (mr->page_shift) {
/*
page sizes are uniform power of 2 so no loop is necessary
entries_spanned_by_off is the number of times the loop below
would have executed.
*/
size_t entries_spanned_by_off;

entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
m = entries_spanned_by_off/QIB_SEGSZ;
n = entries_spanned_by_off%QIB_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
if (n >= QIB_SEGSZ) {
m++;
n = 0;
}
}
}
isge->mr = mr;
isge->vaddr = mr->map[m]->segs[n].vaddr + off;
isge->length = mr->map[m]->segs[n].length - off;
isge->sge_length = sge->length;
isge->m = m;
isge->n = n;
ok:
return 1;
bail:
rcu_read_unlock();
return 0;
}

/**
* qib_rkey_ok - check the IB virtual address, length, and RKEY
* @qp: qp for validation
Expand All @@ -249,11 +150,11 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct rvt_pd *pd,
*
* increments the reference count upon success
*/
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc)
{
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
struct qib_mregion *mr;
struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
struct rvt_mregion *mr;
unsigned n, m;
size_t off;

Expand Down Expand Up @@ -285,7 +186,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
}

mr = rcu_dereference(
rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;

Expand All @@ -308,15 +209,15 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,

entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
m = entries_spanned_by_off/QIB_SEGSZ;
n = entries_spanned_by_off%QIB_SEGSZ;
m = entries_spanned_by_off / RVT_SEGSZ;
n = entries_spanned_by_off % RVT_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
if (n >= QIB_SEGSZ) {
if (n >= RVT_SEGSZ) {
m++;
n = 0;
}
Expand All @@ -335,58 +236,3 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
return 0;
}

/*
* Initialize the memory region specified by the work request.
*/
int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
{
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
struct qib_mr *mr = to_imr(wr->mr);
struct qib_mregion *mrg;
u32 key = wr->key;
unsigned i, n, m;
int ret = -EINVAL;
unsigned long flags;
u64 *page_list;
size_t ps;

spin_lock_irqsave(&rkt->lock, flags);
if (pd->user || key == 0)
goto bail;

mrg = rcu_dereference_protected(
rkt->table[(key >> (32 - ib_qib_lkey_table_size))],
lockdep_is_held(&rkt->lock));
if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd))
goto bail;

if (mr->npages > mrg->max_segs)
goto bail;

ps = mr->ibmr.page_size;
if (mr->ibmr.length > ps * mr->npages)
goto bail;

mrg->user_base = mr->ibmr.iova;
mrg->iova = mr->ibmr.iova;
mrg->lkey = key;
mrg->length = mr->ibmr.length;
mrg->access_flags = wr->access;
page_list = mr->pages;
m = 0;
n = 0;
for (i = 0; i < mr->npages; i++) {
mrg->map[m]->segs[n].vaddr = (void *) page_list[i];
mrg->map[m]->segs[n].length = ps;
if (++n == QIB_SEGSZ) {
m++;
n = 0;
}
}

ret = 0;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
return ret;
}
Loading

0 comments on commit 7c2e11f

Please sign in to comment.