Skip to content

Commit

Permalink
RDMA-rxe: Isolate mr code from atomic_write_reply()
Browse files Browse the repository at this point in the history
Isolate mr specific code from atomic_write_reply() in rxe_resp.c into
a subroutine rxe_mr_do_atomic_write() in rxe_mr.c.
Check length for atomic write operation.
Make iova_to_vaddr() static.

Link: https://lore.kernel.org/r/20230119235936.19728-5-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
  • Loading branch information
Bob Pearson authored and Jason Gunthorpe committed Jan 26, 2023
1 parent f04d5b3 commit d8bdb0e
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 42 deletions.
2 changes: 1 addition & 1 deletion drivers/infiniband/sw/rxe/rxe_loc.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 compare, u64 swap_add, u64 *orig_val);
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
Expand Down
38 changes: 37 additions & 1 deletion drivers/infiniband/sw/rxe/rxe_mr.c
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
}
}

void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
static void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{
size_t offset;
int m, n;
Expand Down Expand Up @@ -565,6 +565,42 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
return 0;
}

/* only implemented for 64 bit architectures */
#if defined CONFIG_64BIT
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
u64 *va;

/* See IBA oA19-28 */
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
rxe_dbg_mr(mr, "mr not in valid state");
return RESPST_ERR_RKEY_VIOLATION;
}

va = iova_to_vaddr(mr, iova, sizeof(value));
if (unlikely(!va)) {
rxe_dbg_mr(mr, "iova out of range");
return RESPST_ERR_RKEY_VIOLATION;
}

/* See IBA A19.4.2 */
if (unlikely((uintptr_t)va & 0x7 || iova & 0x7)) {
rxe_dbg_mr(mr, "misaligned address");
return RESPST_ERR_MISALIGNED_ATOMIC;
}

/* Do atomic write after all prior operations have completed */
smp_store_release(va, value);

return 0;
}
#else
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
#endif

int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
struct rxe_sge *sge = &dma->sge[dma->cur_sge];
Expand Down
61 changes: 21 additions & 40 deletions drivers/infiniband/sw/rxe/rxe_resp.c
Original file line number Diff line number Diff line change
Expand Up @@ -723,30 +723,32 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
return RESPST_ACKNOWLEDGE;
}

#ifdef CONFIG_64BIT
static enum resp_states do_atomic_write(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
static enum resp_states atomic_write_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct rxe_mr *mr = qp->resp.mr;
int payload = payload_size(pkt);
u64 src, *dst;

if (mr->state != RXE_MR_STATE_VALID)
return RESPST_ERR_RKEY_VIOLATION;
struct resp_res *res = qp->resp.res;
struct rxe_mr *mr;
u64 value;
u64 iova;
int err;

memcpy(&src, payload_addr(pkt), payload);
if (!res) {
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
qp->resp.res = res;
}

dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload);
/* check vaddr is 8 bytes aligned. */
if (!dst || (uintptr_t)dst & 7)
return RESPST_ERR_MISALIGNED_ATOMIC;
if (res->replay)
return RESPST_ACKNOWLEDGE;

/* Do atomic write after all prior operations have completed */
smp_store_release(dst, src);
mr = qp->resp.mr;
value = *(u64 *)payload_addr(pkt);
iova = qp->resp.va + qp->resp.offset;

/* decrease resp.resid to zero */
qp->resp.resid -= sizeof(payload);
err = rxe_mr_do_atomic_write(mr, iova, value);
if (err)
return err;

qp->resp.resid = 0;
qp->resp.msn++;

/* next expected psn, read handles this separately */
Expand All @@ -755,29 +757,8 @@ static enum resp_states do_atomic_write(struct rxe_qp *qp,

qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
return RESPST_ACKNOWLEDGE;
}
#else
static enum resp_states do_atomic_write(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
#endif /* CONFIG_64BIT */

static enum resp_states atomic_write_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct resp_res *res = qp->resp.res;

if (!res) {
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
qp->resp.res = res;
}

if (res->replay)
return RESPST_ACKNOWLEDGE;
return do_atomic_write(qp, pkt);
return RESPST_ACKNOWLEDGE;
}

static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
Expand Down

0 comments on commit d8bdb0e

Please sign in to comment.