Skip to content

Commit

Permalink
IB/mlx4: Prevent cross page boundary allocation
Browse files Browse the repository at this point in the history
Prevent cross page boundary allocation by allocating
new page, this is required to be aligned with ConnectX-3 HW
requirements.

Not doing that might cause to "RDMA read local protection" error.

Fixes: 1b2cd0f ('IB/mlx4: Support the new memory registration API')
Suggested-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
  • Loading branch information
Chuck Lever authored and Doug Ledford committed Jun 23, 2016
1 parent 5b420d9 commit cbc9355
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 18 deletions.
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/mlx4/mlx4_ib.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ struct mlx4_ib_mr {
u32 max_pages;
struct mlx4_mr mmr;
struct ib_umem *umem;
void *pages_alloc;
size_t page_map_size;
};

struct mlx4_ib_mw {
Expand Down
34 changes: 17 additions & 17 deletions drivers/infiniband/hw/mlx4/mr.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,30 +277,33 @@ mlx4_alloc_priv_pages(struct ib_device *device,
struct mlx4_ib_mr *mr,
int max_pages)
{
int size = max_pages * sizeof(u64);
int add_size;
int ret;

add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
/* Ensure that size is aligned to DMA cacheline
* requirements.
* max_pages is limited to MLX4_MAX_FAST_REG_PAGES
* so page_map_size will never cross PAGE_SIZE.
*/
mr->page_map_size = roundup(max_pages * sizeof(u64),
MLX4_MR_PAGES_ALIGN);

mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
if (!mr->pages_alloc)
/* Prevent cross page boundary allocation. */
mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
if (!mr->pages)
return -ENOMEM;

mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);

mr->page_map = dma_map_single(device->dma_device, mr->pages,
size, DMA_TO_DEVICE);
mr->page_map_size, DMA_TO_DEVICE);

if (dma_mapping_error(device->dma_device, mr->page_map)) {
ret = -ENOMEM;
goto err;
}

return 0;
err:
kfree(mr->pages_alloc);

err:
free_page((unsigned long)mr->pages);
return ret;
}

Expand All @@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
{
if (mr->pages) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_pages * sizeof(u64);

dma_unmap_single(device->dma_device, mr->page_map,
size, DMA_TO_DEVICE);
kfree(mr->pages_alloc);
mr->page_map_size, DMA_TO_DEVICE);
free_page((unsigned long)mr->pages);
mr->pages = NULL;
}
}
Expand Down Expand Up @@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
mr->npages = 0;

ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
sizeof(u64) * mr->max_pages,
DMA_TO_DEVICE);
mr->page_map_size, DMA_TO_DEVICE);

rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);

ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
sizeof(u64) * mr->max_pages,
DMA_TO_DEVICE);
mr->page_map_size, DMA_TO_DEVICE);

return rc;
}

0 comments on commit cbc9355

Please sign in to comment.