Skip to content

Commit

Permalink
RDMA/mlx5: Refactor affinity related code
Browse files Browse the repository at this point in the history
Move affinity related code in modify qp to function.  It's a preparation
for next patch the extend the affinity calculation to consider the xmit
slave.

Link: https://lore.kernel.org/r/20200430192146.12863-16-maorg@mellanox.com
Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
  • Loading branch information
Maor Gottlieb authored and Jason Gunthorpe committed May 2, 2020
1 parent 51aab12 commit 5163b27
Showing 1 changed file with 53 additions and 37 deletions.
90 changes: 53 additions & 37 deletions drivers/infiniband/hw/mlx5/qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -3582,33 +3582,61 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return 0;
}

static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
struct mlx5_ib_pd *pd,
struct mlx5_ib_qp_base *qp_base,
u8 port_num, struct ib_udata *udata)
static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
unsigned int tx_port_affinity;
u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_t *tx_port_affinity;

if (ucontext) {
tx_port_affinity = (unsigned int)atomic_add_return(
1, &ucontext->tx_port_affinity) %
MLX5_MAX_PORTS +
1;
if (ucontext)
tx_port_affinity = &ucontext->tx_port_affinity;
else
tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;

return (unsigned int)atomic_add_return(1, tx_port_affinity) %
MLX5_MAX_PORTS + 1;
}

static bool qp_supports_affinity(struct ib_qp *qp)
{
struct mlx5_ib_qp *mqp = to_mqp(qp);

if ((qp->qp_type == IB_QPT_RC) ||
(qp->qp_type == IB_QPT_UD &&
!(mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)) ||
(qp->qp_type == IB_QPT_UC) ||
(qp->qp_type == IB_QPT_RAW_PACKET) ||
(qp->qp_type == IB_QPT_XRC_INI) ||
(qp->qp_type == IB_QPT_XRC_TGT))
return true;
return false;
}

static unsigned int get_tx_affinity(struct ib_qp *qp, u8 init,
struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_ib_qp_base *qp_base;
unsigned int tx_affinity;

if (!(dev->lag_active && init && qp_supports_affinity(qp)))
return 0;

tx_affinity = get_tx_affinity_rr(dev, udata);

qp_base = &mqp->trans_qp.base;
if (ucontext)
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
tx_port_affinity, qp_base->mqp.qpn, ucontext);
} else {
tx_port_affinity =
(unsigned int)atomic_add_return(
1, &dev->port[port_num].roce.tx_port_affinity) %
MLX5_MAX_PORTS +
1;
tx_affinity, qp_base->mqp.qpn, ucontext);
else
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
tx_port_affinity, qp_base->mqp.qpn);
}

return tx_port_affinity;
tx_affinity, qp_base->mqp.qpn);
return tx_affinity;
}

static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
Expand Down Expand Up @@ -3718,22 +3746,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
}

if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
if ((ibqp->qp_type == IB_QPT_RC) ||
(ibqp->qp_type == IB_QPT_UD &&
!(qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)) ||
(ibqp->qp_type == IB_QPT_UC) ||
(ibqp->qp_type == IB_QPT_RAW_PACKET) ||
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (dev->lag_active) {
u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
tx_affinity = get_tx_affinity(dev, pd, base, p,
udata);
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
}
tx_affinity = get_tx_affinity(ibqp,
cur_state == IB_QPS_RESET &&
new_state == IB_QPS_INIT, udata);
context->flags |= cpu_to_be32(tx_affinity << 24);

if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
Expand Down

0 comments on commit 5163b27

Please sign in to comment.