Skip to content

Commit

Permalink
RDMA/rxe: Cleanup init_send_wqe
Browse files Browse the repository at this point in the history
This patch changes the type of init_send_wqe in rxe_verbs.c to void since
it always returns 0. It also separates out the code that copies inline
data into the send wqe as copy_inline_data_to_wqe().

Link: https://lore.kernel.org/r/20210206002437.2756-1-rpearson@hpe.com
Signed-off-by: Bob Pearson <rpearson@hpe.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
  • Loading branch information
Bob Pearson authored and Jason Gunthorpe committed Feb 9, 2021
1 parent dc78074 commit 086f580
Showing 1 changed file with 19 additions and 23 deletions.
42 changes: 19 additions & 23 deletions drivers/infiniband/sw/rxe/rxe_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -555,43 +555,44 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
}
}

static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
const struct ib_send_wr *ibwr)
{
struct ib_sge *sge = ibwr->sg_list;
u8 *p = wqe->dma.inline_data;
int i;

for (i = 0; i < ibwr->num_sge; i++, sge++) {
memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
p += sge->length;
}
}

static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length,
struct rxe_send_wqe *wqe)
{
int num_sge = ibwr->num_sge;
struct ib_sge *sge;
int i;
u8 *p;

init_send_wr(qp, &wqe->wr, ibwr);

/* local operation */
if (unlikely(mask & WR_REG_MASK)) {
wqe->mask = mask;
wqe->state = wqe_state_posted;
return 0;
return;
}

if (qp_type(qp) == IB_QPT_UD ||
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI)
memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));

if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
p = wqe->dma.inline_data;

sge = ibwr->sg_list;
for (i = 0; i < num_sge; i++, sge++) {
memcpy(p, (void *)(uintptr_t)sge->addr,
sge->length);

p += sge->length;
}
} else {
if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
copy_inline_data_to_wqe(wqe, ibwr);
else
memcpy(wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
}

wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
Expand All @@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
wqe->dma.sge_offset = 0;
wqe->state = wqe_state_posted;
wqe->ssn = atomic_add_return(1, &qp->ssn);

return 0;
}

static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Expand All @@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
}

send_wqe = producer_addr(sq->queue);

err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
if (unlikely(err))
goto err1;
init_send_wqe(qp, ibwr, mask, length, send_wqe);

advance_producer(sq->queue);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
Expand Down

0 comments on commit 086f580

Please sign in to comment.