Skip to content

Commit

Permalink
IB/mlx5: Remove check of FW capabilities in ODP page fault handling
Browse files Browse the repository at this point in the history
As page fault handling is initiated by FW, there is no need to check that
the ODP supports the operation and transport.

Link: https://lore.kernel.org/r/20190819120815.21225-3-leon@kernel.org
Signed-off-by: Michael Guralnik <michaelgur@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
  • Loading branch information
Michael Guralnik authored and Jason Gunthorpe committed Aug 28, 2019
1 parent 00679b6 commit 29af949
Showing 1 changed file with 1 addition and 47 deletions.
48 changes: 1 addition & 47 deletions drivers/infiniband/hw/mlx5/odp.c
Original file line number Diff line number Diff line change
Expand Up @@ -990,17 +990,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
return ret < 0 ? ret : npages;
}

static const u32 mlx5_ib_odp_opcode_cap[] = {
[MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
[MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
[MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
};

/*
* Parse initiator WQE. Advances the wqe pointer to point at the
* scatter-gather list, and set wqe_end to the end of the WQE.
Expand All @@ -1011,7 +1000,6 @@ static int mlx5_ib_mr_initiator_pfault_handler(
{
struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
u16 wqe_index = pfault->wqe.wqe_index;
u32 transport_caps;
struct mlx5_base_av *av;
unsigned ds, opcode;
#if defined(DEBUG)
Expand Down Expand Up @@ -1059,29 +1047,8 @@ static int mlx5_ib_mr_initiator_pfault_handler(
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK;

switch (qp->ibqp.qp_type) {
case IB_QPT_XRC_INI:
if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
*wqe += sizeof(struct mlx5_wqe_xrc_seg);
transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
break;
case IB_QPT_RC:
transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
break;
case IB_QPT_UD:
transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
break;
default:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}

if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
!(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
opcode);
return -EFAULT;
}

if (qp->ibqp.qp_type == IB_QPT_UD) {
av = *wqe;
Expand Down Expand Up @@ -1146,19 +1113,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
return -EFAULT;
}

switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_RECV))
goto invalid_transport_or_opcode;
break;
default:
invalid_transport_or_opcode:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}

*wqe_end = wqe + wqe_size;

return 0;
Expand Down

0 comments on commit 29af949

Please sign in to comment.