Skip to content

Commit

Permalink
RDMA/bnxt_re: Add memory barriers when processing CQ/EQ entries
Browse files Browse the repository at this point in the history
The code determines if the next ring entry is valid before proceeding
further to read the rest of the entry. The CPU can re-order and read
the rest of the entry first, possibly reading a stale entry, if DMA
of a new entry happens right after reading it.

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
  • Loading branch information
Somnath Kotur authored and Doug Ledford committed Nov 13, 2017
1 parent 685894d commit 9b40183
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 0 deletions.
21 changes: 21 additions & 0 deletions drivers/infiniband/hw/bnxt_re/qplib_fp.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
break;

/*
* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();

type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
switch (type) {
case NQ_BASE_TYPE_CQ_NOTIFICATION:
Expand Down Expand Up @@ -1113,6 +1119,11 @@ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
continue;
/*
* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
case CQ_BASE_CQE_TYPE_REQ:
case CQ_BASE_CQE_TYPE_TERMINAL:
Expand Down Expand Up @@ -1896,6 +1907,11 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
/*
* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
/* If the next hwcqe is a REQ */
if ((peek_hwcqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK) ==
Expand Down Expand Up @@ -2440,6 +2456,11 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
break;

/*
* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
/* From the device's respective CQE format to qplib_wc*/
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
case CQ_BASE_CQE_TYPE_REQ:
Expand Down
4 changes: 4 additions & 0 deletions drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,10 @@ static void bnxt_qplib_service_creq(unsigned long data)
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
break;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();

type = creqe->type & CREQ_BASE_TYPE_MASK;
switch (type) {
Expand Down

0 comments on commit 9b40183

Please sign in to comment.