Skip to content

Commit

Permalink
RDMA/hns: Support configuring doorbell mode of RQ and CQ
Browse files Browse the repository at this point in the history
HIP08 supports both normal and record doorbell mode for RQ and CQ, SQ
record doorbell for userspace is also supported by the software for
flushing CQE process. As now the capability of HIP08 are exposed to the
user and are configurable, the support of normal doorbell should be added
back.

Note that, if switching to normal doorbell, the kernel will report "flush
cqe is unsupported" if modify qp to error status as the flush is based on
record doorbell.

Link: https://lore.kernel.org/r/1616840738-7866-2-git-send-email-liweihang@huawei.com
Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
  • Loading branch information
Yixian Liu authored and Jason Gunthorpe committed Apr 1, 2021
1 parent 8115f97 commit cf8cd4c
Show file tree
Hide file tree
Showing 5 changed files with 66 additions and 40 deletions.
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/hns/hns_roce_cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct ib_udata *udata, unsigned long addr,
struct hns_roce_ib_create_cq_resp *resp)
{
bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
struct hns_roce_ucontext *uctx;
int err;

Expand Down
4 changes: 2 additions & 2 deletions drivers/infiniband/hw/hns/hns_roce_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,8 @@ enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
HNS_ROCE_CAP_FLAG_XRC = BIT(6),
HNS_ROCE_CAP_FLAG_MW = BIT(7),
Expand Down
88 changes: 56 additions & 32 deletions drivers/infiniband/hw/hns/hns_roce_hw_v2.c
Original file line number Diff line number Diff line change
Expand Up @@ -638,7 +638,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
if (qp->state == IB_QPS_ERR) {
if (unlikely(qp->state == IB_QPS_ERR)) {
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
} else {
Expand All @@ -659,6 +659,40 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
}
}

static inline void update_rq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
/*
* Hip08 hardware cannot flush the WQEs in RQ if the QP state
* gets into errored mode. Hence, as a workaround to this
* hardware limitation, driver needs to assist in flushing. But
* the flushing operation uses mailbox to convey the QP state to
* the hardware and which can sleep due to the mutex protection
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
if (unlikely(qp->state == IB_QPS_ERR)) {
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
} else {
if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
*qp->rdb.db_record =
qp->rq.head & V2_DB_PARAMETER_IDX_M;
} else {
struct hns_roce_v2_db rq_db = {};

roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
V2_DB_BYTE_4_TAG_S, qp->qpn);
roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
roce_set_field(rq_db.parameter, V2_DB_PARAMETER_IDX_M,
V2_DB_PARAMETER_IDX_S, qp->rq.head);

hns_roce_write64_k((__le32 *)&rq_db, qp->rq.db_reg_l);
}
}
}

static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
u64 __iomem *dest)
{
Expand Down Expand Up @@ -884,22 +918,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
if (likely(nreq)) {
hr_qp->rq.head += nreq;

/*
* Hip08 hardware cannot flush the WQEs in RQ if the QP state
* gets into errored mode. Hence, as a workaround to this
* hardware limitation, driver needs to assist in flushing. But
* the flushing operation uses mailbox to convey the QP state to
* the hardware and which can sleep due to the mutex protection
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
if (hr_qp->state == IB_QPS_ERR) {
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
&hr_qp->flush_flag))
init_flush_work(hr_dev, hr_qp);
} else {
*hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
}
update_rq_db(hr_dev, hr_qp);
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);

Expand Down Expand Up @@ -1921,8 +1940,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)

caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
HNS_ROCE_CAP_FLAG_QP_RECORD_DB;

caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
Expand Down Expand Up @@ -3145,7 +3164,23 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)

static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
{
*hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
*hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
} else {
struct hns_roce_v2_db cq_db = {};

roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
V2_CQ_DB_PARAMETER_CONS_IDX_S,
ci & ((hr_cq->cq_depth << 1) - 1));
roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
V2_CQ_DB_PARAMETER_CMD_SN_S, 1);

hns_roce_write64_k((__le32 *)&cq_db, hr_cq->cq_db_l);
}
}

static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
Expand Down Expand Up @@ -4257,17 +4292,6 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);

roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);

roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);

return 0;
}

Expand Down Expand Up @@ -5084,7 +5108,7 @@ static void clear_qp(struct hns_roce_qp *hr_qp)
hr_qp->qpn, ibqp->srq ?
to_hr_srq(ibqp->srq) : NULL);

if (hr_qp->rq.wqe_cnt)
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
*hr_qp->rdb.db_record = 0;

hr_qp->rq.head = 0;
Expand Down
6 changes: 4 additions & 2 deletions drivers/infiniband/hw/hns/hns_roce_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
if (ret)
goto error_fail_uar_alloc;

if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
}
Expand Down Expand Up @@ -729,7 +730,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);

if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&hr_dev->pgdir_list);
mutex_init(&hr_dev->pgdir_mutex);
}
Expand Down
6 changes: 3 additions & 3 deletions drivers/infiniband/hw/hns/hns_roce_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -785,7 +785,7 @@ static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp_resp *resp,
struct hns_roce_ib_create_qp *ucmd)
{
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_sq(init_attr) &&
udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
Expand All @@ -796,15 +796,15 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_udata *udata,
struct hns_roce_ib_create_qp_resp *resp)
{
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_rq(init_attr));
}

static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_qp_init_attr *init_attr)
{
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
hns_roce_qp_has_rq(init_attr));
}

Expand Down

0 comments on commit cf8cd4c

Please sign in to comment.