Skip to content

Commit

Permalink
RDMA/mlx5: Lift implicit_mr_alloc() into the two routines that call it
Browse files Browse the repository at this point in the history
This makes the routines easier to understand, particularly with respect
the locking requirements of the entire sequence. The implicit_mr_alloc()
had a lot of ifs specializing it to each of the callers, and only a very
small amount of code was actually shared.

Following patches will cause the flow in the two functions to diverge
further.

Link: https://lore.kernel.org/r/20191009160934.3143-7-jgg@ziepe.ca
Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
  • Loading branch information
Jason Gunthorpe committed Oct 28, 2019
1 parent 3d5f3c5 commit c2edcd6
Showing 1 changed file with 74 additions and 77 deletions.
151 changes: 74 additions & 77 deletions drivers/infiniband/hw/mlx5/odp.c
Original file line number Diff line number Diff line change
Expand Up @@ -416,96 +416,66 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
wq_num, err);
}

static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
struct ib_umem_odp *umem_odp,
bool ksm, int access_flags)
static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
unsigned long idx)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
struct mlx5_ib_mr *ret;
int err;

mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
MLX5_IMR_MTT_CACHE_ENTRY);
odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
idx * MLX5_IMR_MTT_SIZE,
MLX5_IMR_MTT_SIZE);
if (IS_ERR(odp))
return ERR_CAST(odp);

ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY);
if (IS_ERR(mr))
return mr;
goto out_umem;

err = xa_reserve(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
err = xa_reserve(&imr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
GFP_KERNEL);
if (err)
if (err) {
ret = ERR_PTR(err);
goto out_mr;

mr->ibmr.pd = pd;

mr->dev = dev;
mr->access_flags = access_flags;
mr->mmkey.iova = 0;
mr->umem = &umem_odp->umem;

if (ksm) {
err = mlx5_ib_update_xlt(mr, 0,
mlx5_imr_ksm_entries,
MLX5_KSM_PAGE_SHIFT,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ENABLE);

} else {
err = mlx5_ib_update_xlt(mr, 0,
MLX5_IMR_MTT_ENTRIES,
PAGE_SHIFT,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ENABLE |
MLX5_IB_UPD_XLT_ATOMIC);
}

if (err)
goto out_release;

mr->ibmr.pd = imr->ibmr.pd;
mr->access_flags = imr->access_flags;
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->mmkey.iova = 0;
mr->parent = imr;
odp->private = mr;
INIT_WORK(&odp->work, mr_leaf_free_action);

err = mlx5_ib_update_xlt(mr, 0,
MLX5_IMR_MTT_ENTRIES,
PAGE_SHIFT,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ENABLE |
MLX5_IB_UPD_XLT_ATOMIC);
if (err) {
ret = ERR_PTR(err);
goto out_release;
}

mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
mr->mmkey.key, dev->mdev, mr);
mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
xa_store(&imr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
&mr->mmkey, GFP_ATOMIC);

mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
return mr;

out_release:
xa_release(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
xa_release(&imr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
out_mr:
mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
mlx5_mr_cache_free(dev, mr);

return ERR_PTR(err);
}

static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
unsigned long idx)
{
struct ib_umem_odp *odp;
struct mlx5_ib_mr *mtt;

odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
idx * MLX5_IMR_MTT_SIZE,
MLX5_IMR_MTT_SIZE);
if (IS_ERR(odp))
return ERR_CAST(odp);

mtt = implicit_mr_alloc(imr->ibmr.pd, odp, 0, imr->access_flags);
if (IS_ERR(mtt)) {
ib_umem_odp_release(odp);
return mtt;
}

odp->private = mtt;
mtt->umem = &odp->umem;
mtt->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
mtt->parent = imr;
INIT_WORK(&odp->work, mr_leaf_free_action);

xa_store(&mtt->dev->odp_mkeys, mlx5_base_mkey(mtt->mmkey.key),
&mtt->mmkey, GFP_ATOMIC);
return mtt;
mlx5_mr_cache_free(imr->dev, mr);
out_umem:
ib_umem_odp_release(odp);
return ret;
}

static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *imr,
Expand Down Expand Up @@ -573,27 +543,54 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags)
{
struct mlx5_ib_mr *imr;
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
struct ib_umem_odp *umem_odp;
struct mlx5_ib_mr *imr;
int err;

umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);

imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY);
if (IS_ERR(imr)) {
ib_umem_odp_release(umem_odp);
return ERR_CAST(imr);
err = PTR_ERR(imr);
goto out_umem;
}

imr->ibmr.pd = &pd->ibpd;
imr->access_flags = access_flags;
imr->mmkey.iova = 0;
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
imr->ibmr.rkey = imr->mmkey.key;
imr->umem = &umem_odp->umem;
init_waitqueue_head(&imr->q_leaf_free);
atomic_set(&imr->num_leaf_free, 0);
atomic_set(&imr->num_pending_prefetch, 0);
xa_store(&imr->dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
&imr->mmkey, GFP_ATOMIC);

err = mlx5_ib_update_xlt(imr, 0,
mlx5_imr_ksm_entries,
MLX5_KSM_PAGE_SHIFT,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ENABLE);
if (err)
goto out_mr;

err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
&imr->mmkey, GFP_KERNEL));
if (err)
goto out_mr;

mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
return imr;
out_mr:
mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
mlx5_mr_cache_free(dev, imr);
out_umem:
ib_umem_odp_release(umem_odp);
return ERR_PTR(err);
}

void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
Expand Down

0 comments on commit c2edcd6

Please sign in to comment.