Skip to content

Commit

Permalink
IB/iser: Use the new verbs DMA mapping functions
Browse files Browse the repository at this point in the history
Convert iSER to use the new verbs DMA mapping functions for kernel
verbs consumers.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
  • Loading branch information
Ralph Campbell authored and Roland Dreier committed Dec 12, 2006
1 parent 85507bc commit 5180311
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 63 deletions.
2 changes: 1 addition & 1 deletion drivers/infiniband/ulp/iser/iscsi_iser.h
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */
u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
atomic_t ref_count; /* refcount, freed when dec to 0 */
Expand Down
122 changes: 60 additions & 62 deletions drivers/infiniband/ulp/iser/iser_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
*/
int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
{
struct device *dma_device;
struct ib_device *dev;

if ((atomic_read(&regd_buf->ref_count) == 0) ||
atomic_dec_and_test(&regd_buf->ref_count)) {
Expand All @@ -61,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
iser_unreg_mem(&regd_buf->reg);

if (regd_buf->dma_addr) {
dma_device = regd_buf->device->ib_device->dma_device;
dma_unmap_single(dma_device,
dev = regd_buf->device->ib_device;
ib_dma_unmap_single(dev,
regd_buf->dma_addr,
regd_buf->data_size,
regd_buf->direction);
Expand All @@ -84,12 +84,12 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction)
{
dma_addr_t dma_addr;
u64 dma_addr;

dma_addr = dma_map_single(device->ib_device->dma_device,
regd_buf->virt_addr,
regd_buf->data_size, direction);
BUG_ON(dma_mapping_error(dma_addr));
dma_addr = ib_dma_map_single(device->ib_device,
regd_buf->virt_addr,
regd_buf->data_size, direction);
BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));

regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.len = regd_buf->data_size;
Expand All @@ -107,7 +107,7 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
int dma_nents;
struct device *dma_device;
struct ib_device *dev;
char *mem = NULL;
struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
unsigned long cmd_data_len = data->data_len;
Expand Down Expand Up @@ -147,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,

iser_ctask->data_copy[cmd_dir].copy_buf = mem;

dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;

if (cmd_dir == ISER_DIR_OUT)
dma_nents = dma_map_sg(dma_device,
&iser_ctask->data_copy[cmd_dir].sg_single,
1, DMA_TO_DEVICE);
else
dma_nents = dma_map_sg(dma_device,
&iser_ctask->data_copy[cmd_dir].sg_single,
1, DMA_FROM_DEVICE);

dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
dma_nents = ib_dma_map_sg(dev,
&iser_ctask->data_copy[cmd_dir].sg_single,
1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);

iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
Expand All @@ -170,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
struct device *dma_device;
struct ib_device *dev;
struct iser_data_buf *mem_copy;
unsigned long cmd_data_len;

dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
mem_copy = &iser_ctask->data_copy[cmd_dir];
dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
mem_copy = &iser_ctask->data_copy[cmd_dir];

if (cmd_dir == ISER_DIR_OUT)
dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
DMA_TO_DEVICE);
else
dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
DMA_FROM_DEVICE);
ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);

if (cmd_dir == ISER_DIR_IN) {
char *mem;
Expand Down Expand Up @@ -231,10 +223,11 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
* consecutive elements. Also, it handles one entry SG.
*/
static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct iser_page_vec *page_vec)
struct iser_page_vec *page_vec,
struct ib_device *ibdev)
{
struct scatterlist *sg = (struct scatterlist *)data->buf;
dma_addr_t first_addr, last_addr, page;
u64 first_addr, last_addr, page;
int end_aligned;
unsigned int cur_page = 0;
unsigned long total_sz = 0;
Expand All @@ -244,18 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
page_vec->offset = (u64) sg[0].offset & ~MASK_4K;

for (i = 0; i < data->dma_nents; i++) {
total_sz += sg_dma_len(&sg[i]);
unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);

total_sz += dma_len;

first_addr = sg_dma_address(&sg[i]);
last_addr = first_addr + sg_dma_len(&sg[i]);
first_addr = ib_sg_dma_address(ibdev, &sg[i]);
last_addr = first_addr + dma_len;

end_aligned = !(last_addr & ~MASK_4K);

/* continue to collect page fragments till aligned or SG ends */
while (!end_aligned && (i + 1 < data->dma_nents)) {
i++;
total_sz += sg_dma_len(&sg[i]);
last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
dma_len = ib_sg_dma_len(ibdev, &sg[i]);
total_sz += dma_len;
last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
end_aligned = !(last_addr & ~MASK_4K);
}

Expand Down Expand Up @@ -287,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
struct ib_device *ibdev)
{
struct scatterlist *sg;
dma_addr_t end_addr, next_addr;
u64 end_addr, next_addr;
int i, cnt;
unsigned int ret_len = 0;

Expand All @@ -302,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
(unsigned long)page_to_phys(sg[i].page),
(unsigned long)sg[i].offset,
(unsigned long)sg[i].length); */
end_addr = sg_dma_address(&sg[i]) +
sg_dma_len(&sg[i]);
end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
ib_sg_dma_len(ibdev, &sg[i]);
/* iser_dbg("Checking sg iobuf end address "
"0x%08lX\n", end_addr); */
if (i + 1 < data->dma_nents) {
next_addr = sg_dma_address(&sg[i+1]);
next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
/* are i, i+1 fragments of the same page? */
if (end_addr == next_addr)
continue;
Expand All @@ -324,17 +321,18 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
return ret_len;
}

static void iser_data_buf_dump(struct iser_data_buf *data)
static void iser_data_buf_dump(struct iser_data_buf *data,
struct ib_device *ibdev)
{
struct scatterlist *sg = (struct scatterlist *)data->buf;
int i;

for (i = 0; i < data->dma_nents; i++)
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)sg_dma_address(&sg[i]),
i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
sg[i].page, sg[i].offset,
sg[i].length,sg_dma_len(&sg[i]));
sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
}

static void iser_dump_page_vec(struct iser_page_vec *page_vec)
Expand All @@ -348,22 +346,23 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
}

static void iser_page_vec_build(struct iser_data_buf *data,
struct iser_page_vec *page_vec)
struct iser_page_vec *page_vec,
struct ib_device *ibdev)
{
int page_vec_len = 0;

page_vec->length = 0;
page_vec->offset = 0;

iser_dbg("Translating sg sz: %d\n", data->dma_nents);
page_vec_len = iser_sg_to_page_vec(data,page_vec);
page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);

page_vec->length = page_vec_len;

if (page_vec_len * SIZE_4K < page_vec->data_size) {
iser_err("page_vec too short to hold this SG\n");
iser_data_buf_dump(data);
iser_data_buf_dump(data, ibdev);
iser_dump_page_vec(page_vec);
BUG();
}
Expand All @@ -374,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
struct device *dma_device;
struct ib_device *dev;

iser_ctask->dir[iser_dir] = 1;
dma_device =
iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device;

data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
Expand All @@ -390,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,

void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
{
struct device *dma_device;
struct ib_device *dev;
struct iser_data_buf *data;

dma_device =
iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
dev = iser_ctask->iser_conn->ib_conn->device->ib_device;

if (iser_ctask->dir[ISER_DIR_IN]) {
data = &iser_ctask->data[ISER_DIR_IN];
dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
}

if (iser_ctask->dir[ISER_DIR_OUT]) {
data = &iser_ctask->data[ISER_DIR_OUT];
dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
}
}

Expand All @@ -418,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
{
struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
struct iser_regd_buf *regd_buf;
int aligned_len;
Expand All @@ -427,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,

regd_buf = &iser_ctask->rdma_regd[cmd_dir];

aligned_len = iser_data_buf_aligned_len(mem);
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
iser_err("rdma alignment violation %d/%d aligned\n",
aligned_len, mem->size);
iser_data_buf_dump(mem);
iser_data_buf_dump(mem, ibdev);

/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_ctask);
Expand All @@ -449,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,

regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.len = sg_dma_len(&sg[0]);
regd_buf->reg.va = sg_dma_address(&sg[0]);
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
regd_buf->reg.is_fmr = 0;

iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
Expand All @@ -460,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, ib_conn->page_vec);
iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
if (err) {
iser_data_buf_dump(mem);
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
ntoh24(iser_ctask->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
Expand Down

0 comments on commit 5180311

Please sign in to comment.