Skip to content

Commit

Permalink
IB/iser: DMA unmap unaligned for RDMA data before touching it
Browse files Browse the repository at this point in the history
iSER uses the DMA mapping api to map the page holding the
SCSI command data to the HCA DMA address space. When the
command data is not aligned for RDMA, the data is copied
to/from an allocated buffer which in turn is used for
executing this command. The pages associated with the
command must be unmapped before being touched.

Signed-off-by: Erez Zilber <erezz@voltaire.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
  • Loading branch information
Erez Zilber authored and Roland Dreier committed Sep 28, 2006
1 parent 87e8df7 commit 74a2078
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 39 deletions.
7 changes: 7 additions & 0 deletions drivers/infiniband/ulp/iser/iscsi_iser.h
Original file line number Diff line number Diff line change
Expand Up @@ -355,4 +355,11 @@ int iser_post_send(struct iser_desc *tx_desc);

int iser_conn_state_comp(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp);

int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir);

void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
#endif
49 changes: 10 additions & 39 deletions drivers/infiniband/ulp/iser/iser_initiator.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,42 +66,6 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
dto->regd_vector_len++;
}

static int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
struct device *dma_device;

iser_ctask->dir[iser_dir] = 1;
dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;

data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
return 0;
}

static void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
{
struct device *dma_device;
struct iser_data_buf *data;

dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;

if (iser_ctask->dir[ISER_DIR_IN]) {
data = &iser_ctask->data[ISER_DIR_IN];
dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
}

if (iser_ctask->dir[ISER_DIR_OUT]) {
data = &iser_ctask->data[ISER_DIR_OUT];
dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
}
}

/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
* iser_ctask->data[ISER_DIR_IN].data_len
Expand Down Expand Up @@ -699,14 +663,19 @@ void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
{
int deferred;
int is_rdma_aligned = 1;

/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL)
if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
is_rdma_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL)
}
if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
is_rdma_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
}

if (iser_ctask->dir[ISER_DIR_IN]) {
deferred = iser_regd_buff_release
Expand All @@ -726,7 +695,9 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
}
}

iser_dma_unmap_task_data(iser_ctask);
/* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned)
iser_dma_unmap_task_data(iser_ctask);
}

void iser_dto_buffs_release(struct iser_dto *dto)
Expand Down
42 changes: 42 additions & 0 deletions drivers/infiniband/ulp/iser/iser_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,44 @@ static void iser_page_vec_build(struct iser_data_buf *data,
}
}

int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
struct device *dma_device;

iser_ctask->dir[iser_dir] = 1;
dma_device =
iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;

data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
return 0;
}

void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
{
struct device *dma_device;
struct iser_data_buf *data;

dma_device =
iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;

if (iser_ctask->dir[ISER_DIR_IN]) {
data = &iser_ctask->data[ISER_DIR_IN];
dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
}

if (iser_ctask->dir[ISER_DIR_OUT]) {
data = &iser_ctask->data[ISER_DIR_OUT];
dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
}
}

/**
* iser_reg_rdma_mem - Registers memory intended for RDMA,
* obtaining rkey and va
Expand All @@ -394,6 +432,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
iser_err("rdma alignment violation %d/%d aligned\n",
aligned_len, mem->size);
iser_data_buf_dump(mem);

/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_ctask);

/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */
if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
Expand Down

0 comments on commit 74a2078

Please sign in to comment.