Skip to content

Commit

Permalink
RDMA/mlx5: Support dma-buf based userspace memory region
Browse files Browse the repository at this point in the history
Implement the new driver method 'reg_user_mr_dmabuf'.  Utilize the core
functions to import dma-buf based memory region and update the mappings.

Add code to handle dma-buf related page fault.

Link: https://lore.kernel.org/r/1608067636-98073-5-git-send-email-jianxin.xiong@intel.com
Signed-off-by: Jianxin Xiong <jianxin.xiong@intel.com>
Reviewed-by: Sean Hefty <sean.hefty@intel.com>
Acked-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Acked-by: Christian Koenig <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
  • Loading branch information
Jianxin Xiong authored and Jason Gunthorpe committed Jan 20, 2021
1 parent bfe0cc6 commit 90da7dc
Show file tree
Hide file tree
Showing 4 changed files with 216 additions and 12 deletions.
2 changes: 2 additions & 0 deletions drivers/infiniband/hw/mlx5/main.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
* Copyright (c) 2020, Intel Corporation. All rights reserved.
*/

#include <linux/debugfs.h>
Expand Down Expand Up @@ -4068,6 +4069,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.query_srq = mlx5_ib_query_srq,
.query_ucontext = mlx5_ib_query_ucontext,
.reg_user_mr = mlx5_ib_reg_user_mr,
.reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
.req_notify_cq = mlx5_ib_arm_cq,
.rereg_user_mr = mlx5_ib_rereg_user_mr,
.resize_cq = mlx5_ib_resize_cq,
Expand Down
18 changes: 18 additions & 0 deletions drivers/infiniband/hw/mlx5/mlx5_ib.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
* Copyright (c) 2020, Intel Corporation. All rights reserved.
*/

#ifndef MLX5_IB_H
Expand Down Expand Up @@ -703,6 +704,12 @@ static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
mr->umem->is_odp;
}

static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
{
return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
mr->umem->is_dmabuf;
}

struct mlx5_ib_mw {
struct ib_mw ibmw;
struct mlx5_core_mkey mmkey;
Expand Down Expand Up @@ -1243,6 +1250,10 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
struct ib_udata *udata);
int mlx5_ib_advise_mr(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags,
Expand All @@ -1253,11 +1264,13 @@ int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
void mlx5_ib_fence_dmabuf_mr(struct mlx5_ib_mr *mr);
struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
Expand Down Expand Up @@ -1343,6 +1356,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge);
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
Expand All @@ -1368,6 +1382,10 @@ static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{
return -EOPNOTSUPP;
}
static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */

extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
Expand Down
119 changes: 111 additions & 8 deletions drivers/infiniband/hw/mlx5/mr.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
* Copyright (c) 2020, Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
Expand Down Expand Up @@ -36,6 +37,8 @@
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
Expand Down Expand Up @@ -935,6 +938,17 @@ static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr->access_flags = access_flags;
}

static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
u64 iova)
{
/*
* The alignment of iova has already been checked upon entering
* UVERBS_METHOD_REG_DMABUF_MR
*/
umem->iova = iova;
return PAGE_SIZE;
}

static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
int access_flags)
Expand All @@ -944,7 +958,11 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct mlx5_ib_mr *mr;
unsigned int page_size;

page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, 0, iova);
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
else
page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
0, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
ent = mr_cache_ent_from_order(
Expand Down Expand Up @@ -980,7 +998,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->mmkey.size = umem->length;
mr->mmkey.pd = to_mpd(pd)->pdn;
mr->page_shift = order_base_2(page_size);
mr->umem = umem;
set_mr_fields(dev, mr, umem->length, access_flags);

return mr;
Expand Down Expand Up @@ -1201,8 +1218,10 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,

/*
* Send the DMA list to the HW for a normal MR using UMR.
* Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
* flag may be used.
*/
static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
Expand Down Expand Up @@ -1244,6 +1263,10 @@ static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
cur_mtt->ptag =
cpu_to_be64(rdma_block_iter_dma_address(&biter) |
MLX5_IB_MTT_PRESENT);

if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
cur_mtt->ptag = 0;

cur_mtt++;
}

Expand Down Expand Up @@ -1567,6 +1590,84 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return create_real_mr(pd, umem, iova, access_flags);
}

static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
{
struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
struct mlx5_ib_mr *mr = umem_dmabuf->private;

dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);

if (!umem_dmabuf->sgt)
return;

mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
}

static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
.allow_peer2peer = 1,
.move_notify = mlx5_ib_dmabuf_invalidate_cb,
};

struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr,
int fd, int access_flags,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
struct ib_umem_dmabuf *umem_dmabuf;
int err;

if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
return ERR_PTR(-EOPNOTSUPP);

mlx5_ib_dbg(dev,
"offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
offset, virt_addr, length, fd, access_flags);

/* dmabuf requires xlt update via umr to work. */
if (!mlx5_ib_can_load_pas_with_umr(dev, length))
return ERR_PTR(-EINVAL);

umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
access_flags,
&mlx5_ib_dmabuf_attach_ops);
if (IS_ERR(umem_dmabuf)) {
mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
PTR_ERR(umem_dmabuf));
return ERR_CAST(umem_dmabuf);
}

mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
access_flags);
if (IS_ERR(mr)) {
ib_umem_release(&umem_dmabuf->umem);
return ERR_CAST(mr);
}

mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);

atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
umem_dmabuf->private = mr;
init_waitqueue_head(&mr->q_deferred_work);
atomic_set(&mr->num_deferred_work, 0);
err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
&mr->mmkey, GFP_KERNEL));
if (err)
goto err_dereg_mr;

err = mlx5_ib_init_dmabuf_mr(mr);
if (err)
goto err_dereg_mr;
return &mr->ibmr;

err_dereg_mr:
dereg_mr(dev, mr);
return ERR_PTR(err);
}

/**
* mlx5_mr_cache_invalidate - Fence all DMA on the MR
* @mr: The MR to fence
Expand Down Expand Up @@ -1740,8 +1841,8 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
return ERR_PTR(err);
return NULL;
}
/* DM or ODP MR's don't have a umem so we can't re-use it */
if (!mr->umem || is_odp_mr(mr))
/* DM or ODP MR's don't have a normal umem so we can't re-use it */
if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
goto recreate;

/*
Expand All @@ -1760,10 +1861,10 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}

/*
* DM doesn't have a PAS list so we can't re-use it, odp does but the
* logic around releasing the umem is different
* DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
* but the logic around releasing the umem is different
*/
if (!mr->umem || is_odp_mr(mr))
if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
goto recreate;

if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
Expand Down Expand Up @@ -1876,6 +1977,8 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
/* Stop all DMA */
if (is_odp_mr(mr))
mlx5_ib_fence_odp_mr(mr);
else if (is_dmabuf_mr(mr))
mlx5_ib_fence_dmabuf_mr(mr);
else
clean_mr(dev, mr);

Expand Down
Loading

0 comments on commit 90da7dc

Please sign in to comment.