Skip to content

Commit

Permalink
RDMA/bnxt_re: Support driver specific data collection using rdma tool
Browse files Browse the repository at this point in the history
Allow users to dump driver specific resource details when
queried through rdma tool. This supports the driver data
for QP, CQ, MR and SRQ.

Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Link: https://patch.msgid.link/1730428483-17841-2-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
  • Loading branch information
Kashyap Desai authored and Leon Romanovsky committed Nov 4, 2024
1 parent 775e6d3 commit 7363eb7
Showing 1 changed file with 141 additions and 0 deletions.
141 changes: 141 additions & 0 deletions drivers/infiniband/hw/bnxt_re/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -870,6 +870,139 @@ static const struct attribute_group bnxt_re_dev_attr_group = {
.attrs = bnxt_re_attributes,
};

static int bnxt_re_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
{
struct bnxt_qplib_hwq *mr_hwq;
struct nlattr *table_attr;
struct bnxt_re_mr *mr;

table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;

mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
mr_hwq = &mr->qplib_mr.hwq;

if (rdma_nl_put_driver_u32(msg, "page_size",
mr_hwq->qe_ppg * mr_hwq->element_size))
goto err;
if (rdma_nl_put_driver_u32(msg, "max_elements", mr_hwq->max_elements))
goto err;
if (rdma_nl_put_driver_u32(msg, "element_size", mr_hwq->element_size))
goto err;
if (rdma_nl_put_driver_u64_hex(msg, "hwq", (unsigned long)mr_hwq))
goto err;
if (rdma_nl_put_driver_u64_hex(msg, "va", mr->qplib_mr.va))
goto err;

nla_nest_end(msg, table_attr);
return 0;

err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}

static int bnxt_re_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
{
struct bnxt_qplib_hwq *cq_hwq;
struct nlattr *table_attr;
struct bnxt_re_cq *cq;

cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
cq_hwq = &cq->qplib_cq.hwq;

table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;

if (rdma_nl_put_driver_u32(msg, "cq_depth", cq_hwq->depth))
goto err;
if (rdma_nl_put_driver_u32(msg, "max_elements", cq_hwq->max_elements))
goto err;
if (rdma_nl_put_driver_u32(msg, "element_size", cq_hwq->element_size))
goto err;
if (rdma_nl_put_driver_u32(msg, "max_wqe", cq->qplib_cq.max_wqe))
goto err;

nla_nest_end(msg, table_attr);
return 0;

err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}

static int bnxt_re_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
{
struct bnxt_qplib_qp *qplib_qp;
struct nlattr *table_attr;
struct bnxt_re_qp *qp;

table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;

qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
qplib_qp = &qp->qplib_qp;

if (rdma_nl_put_driver_u32(msg, "sq_max_wqe", qplib_qp->sq.max_wqe))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_max_sge", qplib_qp->sq.max_sge))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_wqe_size", qplib_qp->sq.wqe_size))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_swq_start", qplib_qp->sq.swq_start))
goto err;
if (rdma_nl_put_driver_u32(msg, "sq_swq_last", qplib_qp->sq.swq_last))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_max_wqe", qplib_qp->rq.max_wqe))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_max_sge", qplib_qp->rq.max_sge))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_wqe_size", qplib_qp->rq.wqe_size))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_swq_start", qplib_qp->rq.swq_start))
goto err;
if (rdma_nl_put_driver_u32(msg, "rq_swq_last", qplib_qp->rq.swq_last))
goto err;
if (rdma_nl_put_driver_u32(msg, "timeout", qplib_qp->timeout))
goto err;

nla_nest_end(msg, table_attr);
return 0;

err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}

static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
{
struct nlattr *table_attr;
struct bnxt_re_srq *srq;

table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
return -EMSGSIZE;

srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);

if (rdma_nl_put_driver_u32_hex(msg, "wqe_size", srq->qplib_srq.wqe_size))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "max_wqe", srq->qplib_srq.max_wqe))
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge))
goto err;

nla_nest_end(msg, table_attr);
return 0;

err:
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}

static const struct ib_device_ops bnxt_re_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_BNXT_RE,
Expand Down Expand Up @@ -928,6 +1061,13 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
};

static const struct ib_device_ops restrack_ops = {
.fill_res_cq_entry = bnxt_re_fill_res_cq_entry,
.fill_res_qp_entry = bnxt_re_fill_res_qp_entry,
.fill_res_mr_entry = bnxt_re_fill_res_mr_entry,
.fill_res_srq_entry = bnxt_re_fill_res_srq_entry,
};

static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
{
struct ib_device *ibdev = &rdev->ibdev;
Expand All @@ -949,6 +1089,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->driver_def = bnxt_re_uapi_defs;

ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
ib_set_device_ops(ibdev, &restrack_ops);
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
if (ret)
return ret;
Expand Down

0 comments on commit 7363eb7

Please sign in to comment.