From d6793ca97b76642b77629dd0783ec64782a50bdb Mon Sep 17 00:00:00 2001 From: Aharon Landau Date: Tue, 27 Jul 2021 10:16:06 +0300 Subject: [PATCH 1/6] RDMA/mlx5: Delay emptying a cache entry when a new MR is added to it recently Fixing a typo that causes a cache entry to shrink immediately after adding to it new MRs if the entry size exceeds the high limit. In doing so, the cache misses its purpose to prevent the creation of new mkeys on the runtime by using the cached ones. Fixes: b9358bdbc713 ("RDMA/mlx5: Fix locking in MR cache work queue") Link: https://lore.kernel.org/r/fcb546986be346684a016f5ca23a0567399145fa.1627370131.git.leonro@nvidia.com Signed-off-by: Aharon Landau Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3263851ea5744..3f1c5a4f158bf 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) */ spin_unlock_irq(&ent->lock); need_delay = need_resched() || someone_adding(cache) || - time_after(jiffies, - READ_ONCE(cache->last_add) + 300 * HZ); + !time_after(jiffies, + READ_ONCE(cache->last_add) + 300 * HZ); spin_lock_irq(&ent->lock); if (ent->disabled) goto out; From db4657afd10e45855ac1d8437fcc9a86bd3d741d Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 29 Jul 2021 14:26:22 -0400 Subject: [PATCH 2/6] RDMA/cma: Revert INIT-INIT patch The net/sunrpc/xprtrdma module creates its QP using rdma_create_qp() and immediately post receives, implicitly assuming the QP is in the INIT state and thus valid for ib_post_recv(). The patch noted in Fixes: removed the RESET->INIT modifiy from rdma_create_qp(), breaking NFS rdma for verbs providers that fail the ib_post_recv() for a bad state. This situation was proven using kprobes in rvt_post_recv() and rvt_modify_qp(). The traces showed that the rvt_post_recv() failed before ANY modify QP and that the current state was RESET. Fix by reverting the patch below. Fixes: dc70f7c3ed34 ("RDMA/cma: Remove unnecessary INIT->INIT transition") Link: https://lore.kernel.org/r/1627583182-81330-1-git-send-email-mike.marciniszyn@cornelisnetworks.com Cc: Haakon Bugge Cc: Chuck Lever III Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 515a7e95a4211..5d3b8b8d163d6 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -926,12 +926,25 @@ static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) return ret; } +static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + int qp_attr_mask, ret; + + qp_attr.qp_state = IB_QPS_INIT; + ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); + if (ret) + return ret; + + return ib_modify_qp(qp, &qp_attr, qp_attr_mask); +} + int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct rdma_id_private *id_priv; struct ib_qp *qp; - int ret = 0; + int ret; id_priv = container_of(id, struct rdma_id_private, id); if (id->device != pd->device) { @@ -948,6 +961,8 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, if (id->qp_type == IB_QPT_UD) ret = cma_init_ud_qp(id_priv, qp); + else + ret = cma_init_conn_qp(id_priv, qp); if (ret) goto out_destroy; From e2a05339fa1188b6b37540f4611893ac4c534fa2 Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Thu, 29 Jul 2021 17:00:38 -0500 Subject: [PATCH 3/6] RDMA/rxe: Use the correct size of wqe when processing SRQ The memcpy() that copies a WQE from a SRQ the QP uses an incorrect size. The size should have been the size of the rxe_send_wqe struct not the size of a pointer to it. The result is that IO operations using a SRQ on the responder side will fail. Fixes: ec0fa2445c18 ("RDMA/rxe: Fix over copying in get_srq_wqe") Link: https://lore.kernel.org/r/20210729220039.18549-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_resp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 3743dc39b60c8..360ec67cb9e1e 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -318,7 +318,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) pr_warn("%s: invalid num_sge in SRQ entry\n", __func__); return RESPST_ERR_MALFORMED_WQE; } - size = sizeof(wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge); + size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge); memcpy(&qp->resp.srq_wqe, wqe, size); qp->resp.wqe = &qp->resp.srq_wqe.wqe; From ef4b96a5773d7f6568363b3d0c3c3f371fb690bd Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Thu, 29 Jul 2021 17:00:39 -0500 Subject: [PATCH 4/6] RDMA/rxe: Restore setting tot_len in the IPv4 header An earlier patch removed setting of tot_len in IPv4 headers because it was also set in ip_local_out. However, this change resulted in an incorrect ICRC being computed because the tot_len field is not masked out. This patch restores that line. This fixes the bug reported by Zhu Yanjun. This bug affects anyone using rxe which is currently broken. Fixes: 230bb836ee88 ("RDMA/rxe: Fix redundant call to ip_send_check") Link: https://lore.kernel.org/r/20210729220039.18549-3-rpearsonhpe@gmail.com Reported-by: Zhu Yanjun Signed-off-by: Bob Pearson Reviewed-and-tested-by: Zhu Yanjun Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_net.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index dec92928a1cd8..5ac27f28ace10 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -259,6 +259,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, iph->version = IPVERSION; iph->ihl = sizeof(struct iphdr) >> 2; + iph->tot_len = htons(skb->len); iph->frag_off = df; iph->protocol = proto; iph->tos = tos; From 8b436a99cd708bd158231a0630ffa49b1d6175e4 Mon Sep 17 00:00:00 2001 From: Yangyang Li Date: Mon, 2 Aug 2021 14:56:14 +0800 Subject: [PATCH 5/6] RDMA/hns: Fix the double unlock problem of poll_sem If hns_roce_cmd_use_events() fails then it means that the poll_sem is not obtained, but the poll_sem is released in hns_roce_cmd_use_polling(), this will cause an unlock problem. This is the static checker warning: drivers/infiniband/hw/hns/hns_roce_main.c:926 hns_roce_init() error: double unlocked '&hr_dev->cmd.poll_sem' (orig line 879) Event mode and polling mode are mutually exclusive and resources are separated, so there is no need to process polling mode resources in event mode. The initial mode of cmd is polling mode, so even if cmd fails to switch to event mode, it is not necessary to switch to polling mode. Fixes: a389d016c030 ("RDMA/hns: Enable all CMDQ context") Fixes: 3d50503b3b33 ("RDMA/hns: Optimize cmd init and mode selection for hip08") Link: https://lore.kernel.org/r/1627887374-20019-1-git-send-email-liangwenpeng@huawei.com Reported-by: Dan Carpenter Signed-off-by: Yangyang Li Signed-off-by: Wenpeng Liang Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_cmd.c | 7 +++---- drivers/infiniband/hw/hns/hns_roce_main.c | 4 +--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 8f68cc3ff193f..84f3f2b5f0976 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) hr_cmd->context = kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL); - if (!hr_cmd->context) + if (!hr_cmd->context) { + hr_dev->cmd_mod = 0; return -ENOMEM; + } for (i = 0; i < hr_cmd->max_cmds; ++i) { hr_cmd->context[i].token = i; @@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) spin_lock_init(&hr_cmd->context_lock); hr_cmd->use_events = 1; - down(&hr_cmd->poll_sem); return 0; } @@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) kfree(hr_cmd->context); hr_cmd->use_events = 0; - - up(&hr_cmd->poll_sem); } struct hns_roce_cmd_mailbox * diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 078a97193f0ea..cc6eab14a2220 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -873,11 +873,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) if (hr_dev->cmd_mod) { ret = hns_roce_cmd_use_events(hr_dev); - if (ret) { + if (ret) dev_warn(dev, "Cmd event mode failed, set back to poll!\n"); - hns_roce_cmd_use_polling(hr_dev); - } } ret = hns_roce_init_hem(hr_dev); From 2638a32348bbb1c384dbbd515fd2b12c155f0188 Mon Sep 17 00:00:00 2001 From: Dakshaja Uppalapati Date: Thu, 5 Aug 2021 18:13:32 +0530 Subject: [PATCH 6/6] RDMA/iw_cxgb4: Fix refcount underflow while destroying cqs. Previous atomic increment/decrement logic expects the atomic count to be '0' after the final decrement. Replacing atomic count with refcount does not allow that, as refcount_dec() considers count of 1 as underflow and triggers a kernel splat. Fix the current refcount logic by using the usual pattern of decrementing the refcount and test if it is '0' on the final deref in c4iw_destroy_cq(). Use wait_for_completion() instead of wait_event(). Fixes: 7183451f846d ("RDMA/cxgb4: Use refcount_t instead of atomic_t for reference counting") Link: https://lore.kernel.org/r/1628167412-12114-1-git-send-email-dakshaja@chelsio.com Signed-off-by: Dakshaja Uppalapati Reviewed-by: Potnuri Bharat Teja Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/cxgb4/cq.c | 12 +++++++++--- drivers/infiniband/hw/cxgb4/ev.c | 6 ++---- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 3 ++- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 6c8c910f4e86d..c7e8d7b3baa18 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return !err || err == -ENODATA ? npolled : err; } +void c4iw_cq_rem_ref(struct c4iw_cq *chp) +{ + if (refcount_dec_and_test(&chp->refcnt)) + complete(&chp->cq_rel_comp); +} + int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { struct c4iw_cq *chp; @@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) chp = to_c4iw_cq(ib_cq); xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); - refcount_dec(&chp->refcnt); - wait_event(chp->wait, !refcount_read(&chp->refcnt)); + c4iw_cq_rem_ref(chp); + wait_for_completion(&chp->cq_rel_comp); ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ibucontext); @@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, spin_lock_init(&chp->lock); spin_lock_init(&chp->comp_handler_lock); refcount_set(&chp->refcnt, 1); - init_waitqueue_head(&chp->wait); + init_completion(&chp->cq_rel_comp); ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); if (ret) goto err_destroy_cq; diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 7798d090888bb..34211a533d5cd 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) break; } done: - if (refcount_dec_and_test(&chp->refcnt)) - wake_up(&chp->wait); + c4iw_cq_rem_ref(chp); c4iw_qp_rem_ref(&qhp->ibqp); out: return; @@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); - if (refcount_dec_and_test(&chp->refcnt)) - wake_up(&chp->wait); + c4iw_cq_rem_ref(chp); } else { pr_debug("unknown cqid 0x%x\n", qid); xa_unlock_irqrestore(&dev->cqs, flag); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 3883af3d2312d..ac5f581aff4ca 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -428,7 +428,7 @@ struct c4iw_cq { spinlock_t lock; spinlock_t comp_handler_lock; refcount_t refcnt; - wait_queue_head_t wait; + struct completion cq_rel_comp; struct c4iw_wr_wait *wr_waitp; }; @@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +void c4iw_cq_rem_ref(struct c4iw_cq *chp); int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);