From b41f7852f3b981df30727f5cd64c1877ceee794e Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Thu, 11 Feb 2016 00:21:52 -0500 Subject: [PATCH 1/3] RDMA/ocrdma: Fix arm logic to align with new cq API Today ocrdma driver defer arming the CQ till poll is called. This was used to prevent calling poll-cq on an armed CQ. Recently a set of new CQ API has been introduced into the linux kernel. The implementation of this API guarantees that a given CQ is never armed before calling poll on it. Most of the kernel ULPs have already moved to use this new API or have a code where poll is called before arming the CQ. Thus, the above workaround in ocrdma is not needed anymore. This patch removes the additional logic to deffer arm till poll is called. This patch adds a simple scheme where ib_req_notify_cq() will actually arm the cq. Signed-off-by: Devesh Sharma Signed-off-by: Doug Ledford --- drivers/infiniband/hw/ocrdma/ocrdma.h | 3 --- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 18 ++++-------------- 2 files changed, 4 insertions(+), 17 deletions(-) diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 040bb8b5cb15a..12503f15fbd6b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -323,9 +323,6 @@ struct ocrdma_cq { */ u32 max_hw_cqe; bool phase_change; - bool deferred_arm, deferred_sol; - bool first_arm; - spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization * to cq polling */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 37620b4baafbd..12420e4ecf3da 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, spin_lock_init(&cq->comp_handler_lock); INIT_LIST_HEAD(&cq->sq_head); INIT_LIST_HEAD(&cq->rq_head); - cq->first_arm = true; if (ib_ctx) { uctx = get_ocrdma_ucontext(ib_ctx); @@ -2910,12 +2909,9 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, } stop_cqe: cq->getp = cur_getp; - if (cq->deferred_arm || polled_hw_cqes) { - ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm, - cq->deferred_sol, polled_hw_cqes); - cq->deferred_arm = false; - cq->deferred_sol = false; - } + + if (polled_hw_cqes) + ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes); return i; } @@ -2999,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) if (cq_flags & IB_CQ_SOLICITED) sol_needed = true; - if (cq->first_arm) { - ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); - cq->first_arm = false; - } - - cq->deferred_arm = true; - cq->deferred_sol = sol_needed; + ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); spin_unlock_irqrestore(&cq->cq_lock, flags); return 0; From c3c0c83667a9fe78cc28c27ec9e5ace3411ae775 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Thu, 11 Feb 2016 10:24:43 +0200 Subject: [PATCH 2/3] IB/mlx4: Add support for extended counters over RoCE ports When attribute IB_PMA_PORT_COUNTERS_EXT is set, we now return 64 bit values for the counters. Signed-off-by: Eran Ben Elisha Signed-off-by: Matan Barak Reviewed-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/mad.c | 50 +++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 26833bfa639bb..75cbe1edc1e70 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -817,17 +817,39 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } -static void edit_counter(struct mlx4_counter *cnt, - struct ib_pma_portcounters *pma_cnt) +static void edit_counter(struct mlx4_counter *cnt, void *counters, + __be16 attr_id) { - ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, - (be64_to_cpu(cnt->tx_bytes) >> 2)); - ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, - (be64_to_cpu(cnt->rx_bytes) >> 2)); - ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, - be64_to_cpu(cnt->tx_frames)); - ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, - be64_to_cpu(cnt->rx_frames)); + switch (attr_id) { + case IB_PMA_PORT_COUNTERS: + { + struct ib_pma_portcounters *pma_cnt = + (struct ib_pma_portcounters *)counters; + + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, + (be64_to_cpu(cnt->tx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, + (be64_to_cpu(cnt->rx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, + be64_to_cpu(cnt->tx_frames)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, + be64_to_cpu(cnt->rx_frames)); + break; + } + case IB_PMA_PORT_COUNTERS_EXT: + { + struct ib_pma_portcounters_ext *pma_cnt_ext = + (struct ib_pma_portcounters_ext *)counters; + + pma_cnt_ext->port_xmit_data = + cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2); + pma_cnt_ext->port_rcv_data = + cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2); + pma_cnt_ext->port_xmit_packets = cnt->tx_frames; + pma_cnt_ext->port_rcv_packets = cnt->rx_frames; + break; + } + } } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, @@ -863,7 +885,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, switch (counter_stats.counter_mode & 0xf) { case 0: edit_counter(&counter_stats, - (void *)(out_mad->data + 40)); + (void *)(out_mad->data + 40), + in_mad->mad_hdr.attr_id); err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; break; default: @@ -894,8 +917,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, */ if (link == IB_LINK_LAYER_INFINIBAND) { if (mlx4_is_slave(dev->dev) && - in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && - in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) + (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && + (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || + in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT))) return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in_mad, out_mad); From c2bab619813a525d3f58b5ffbfcdc4edee27e497 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Thu, 11 Feb 2016 10:24:44 +0200 Subject: [PATCH 3/3] IB/mlx4: Add support for the port info class for RoCE ports Report that driver supports IB_PMA_CLASS_CAP_EXT_WIDTH in respond for IB_MGMT_CLASS_PERF_MGMT mad with IB_PMA_CLASS_PORT_INFO attr id. Signed-off-by: Eran Ben Elisha Signed-off-by: Matan Barak Reviewed-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/mad.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 75cbe1edc1e70..d68f506c1922e 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -852,6 +852,15 @@ static void edit_counter(struct mlx4_counter *cnt, void *counters, } } +static int iboe_process_mad_port_info(void *out_mad) +{ + struct ib_class_port_info cpi = {}; + + cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; + memcpy(out_mad, &cpi, sizeof(cpi)); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in_mad, struct ib_mad *out_mad) @@ -864,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) return -EINVAL; + if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) + return iboe_process_mad_port_info((void *)(out_mad->data + 40)); + memset(&counter_stats, 0, sizeof(counter_stats)); mutex_lock(&dev->counters_table[port_num - 1].mutex); list_for_each_entry(tmp_counter, @@ -919,7 +931,8 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, if (mlx4_is_slave(dev->dev) && (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || - in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT))) + in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || + in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, in_mad, out_mad);