Skip to content

Commit

Permalink
RDMA/mlx5: Cleanup the synchronize_srcu() from the ODP flow
Browse files Browse the repository at this point in the history
Cleanup the synchronize_srcu() from the ODP flow as it was found to be a
very heavy time consumer as part of dereg_mr.

For example de-registration of 10000 ODP MRs each with size of 2M hugepage
took 19.6 sec comparing de-registration of same number of non ODP MRs that
took 172 ms.

The new locking scheme uses the wait_event() mechanism which follows the
use count of the MR instead of using synchronize_srcu().

By that change, the time required for the above test took 95 ms which is
even better than the non ODP flow.

Once fully dropped the srcu usage, had to come with a lock to protect the
XA access.

As part of using the above mechanism we could also clean the
num_deferred_work stuff and follow the use count instead.

Link: https://lore.kernel.org/r/20210202071309.2057998-1-leon@kernel.org
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
  • Loading branch information
Yishai Hadas authored and Jason Gunthorpe committed Feb 9, 2021
1 parent a5887d6 commit db72438
Show file tree
Hide file tree
Showing 7 changed files with 127 additions and 175 deletions.
13 changes: 6 additions & 7 deletions drivers/infiniband/hw/mlx5/devx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1310,9 +1310,9 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->size = MLX5_GET64(mkc, mkc, len);
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
init_waitqueue_head(&mkey->wait);

return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
GFP_KERNEL));
return mlx5r_store_odp_mkey(dev, mkey);
}

static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
Expand Down Expand Up @@ -1385,16 +1385,15 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
int ret;

dev = mlx5_udata_to_mdev(&attrs->driver_udata);
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
xa_erase(&obj->ib_dev->odp_mkeys,
mlx5_base_mkey(obj->devx_mr.mmkey.key)))
/*
* The pagefault_single_data_segment() does commands against
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
xa_erase(&obj->ib_dev->odp_mkeys,
mlx5_base_mkey(obj->devx_mr.mmkey.key));
synchronize_srcu(&dev->odp_srcu);
}
mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);

if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
Expand Down
5 changes: 0 additions & 5 deletions drivers/infiniband/hw/mlx5/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -3869,7 +3869,6 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
WARN_ON(!xa_empty(&dev->odp_mkeys));
cleanup_srcu_struct(&dev->odp_srcu);
mutex_destroy(&dev->cap_mask_mutex);
WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
Expand Down Expand Up @@ -3914,10 +3913,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)

dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);

err = init_srcu_struct(&dev->odp_srcu);
if (err)
goto err_mp;

mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
Expand Down
31 changes: 23 additions & 8 deletions drivers/infiniband/hw/mlx5/mlx5_ib.h
Original file line number Diff line number Diff line change
Expand Up @@ -684,11 +684,8 @@ struct mlx5_ib_mr {
u64 pi_iova;

/* For ODP and implicit */
atomic_t num_deferred_work;
wait_queue_head_t q_deferred_work;
struct xarray implicit_children;
union {
struct rcu_head rcu;
struct list_head elm;
struct work_struct work;
} odp_destroy;
Expand Down Expand Up @@ -1068,11 +1065,6 @@ struct mlx5_ib_dev {
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;

/*
* Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler.
*/
struct srcu_struct odp_srcu;
struct xarray odp_mkeys;

u32 null_mkey;
Expand Down Expand Up @@ -1599,6 +1591,29 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
return true;
}

static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
struct mlx5_core_mkey *mmkey)
{
refcount_set(&mmkey->usecount, 1);

return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
mmkey, GFP_KERNEL));
}

/* deref an mkey that can participate in ODP flow */
static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey)
{
if (refcount_dec_and_test(&mmkey->usecount))
wake_up(&mmkey->wait);
}

/* deref an mkey that can participate in ODP flow and wait for relese */
static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey)
{
mlx5r_deref_odp_mkey(mmkey);
wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
}

int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);

static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
Expand Down
26 changes: 9 additions & 17 deletions drivers/infiniband/hw/mlx5/mr.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
mr->mmkey.type = MLX5_MKEY_MR;
mr->mmkey.key |= mlx5_idx_to_mkey(
MLX5_GET(create_mkey_out, mr->out, mkey_index));
init_waitqueue_head(&mr->mmkey.wait);

WRITE_ONCE(dev->cache.last_add, jiffies);

Expand Down Expand Up @@ -1551,10 +1552,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
}

odp->private = mr;
init_waitqueue_head(&mr->q_deferred_work);
atomic_set(&mr->num_deferred_work, 0);
err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
&mr->mmkey, GFP_KERNEL));
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
if (err)
goto err_dereg_mr;

Expand Down Expand Up @@ -1651,10 +1649,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,

atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
umem_dmabuf->private = mr;
init_waitqueue_head(&mr->q_deferred_work);
atomic_set(&mr->num_deferred_work, 0);
err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
&mr->mmkey, GFP_KERNEL));
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
if (err)
goto err_dereg_mr;

Expand Down Expand Up @@ -2330,9 +2325,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}

if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
err = xa_err(xa_store(&dev->odp_mkeys,
mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
GFP_KERNEL));
err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
if (err)
goto free_mkey;
}
Expand All @@ -2352,14 +2345,13 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);

if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
/*
* pagefault_single_data_segment() may be accessing mmw under
* SRCU if the user bound an ODP MR to this MW.
* pagefault_single_data_segment() may be accessing mmw
* if the user bound an ODP MR to this MW.
*/
synchronize_srcu(&dev->odp_srcu);
}
mlx5r_deref_wait_odp_mkey(&mmw->mmkey);

return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
}
Expand Down
Loading

0 comments on commit db72438

Please sign in to comment.