Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 177110
b: refs/heads/master
c: 55464d4
h: refs/heads/master
v: v3
  • Loading branch information
Bart Van Assche authored and Roland Dreier committed Dec 9, 2009
1 parent 26ef537 commit 2fa4d0b
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 57 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c1ccaf2478f84c2665cf57f981db143aa582d646
refs/heads/master: 55464d461bdcffc4422aebfb750eacf99e3c0f27
122 changes: 66 additions & 56 deletions trunk/drivers/infiniband/ulp/iser/iser_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,6 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
mem_copy->copy_buf = NULL;
}

#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)

/**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array (may be less than
Expand All @@ -223,93 +221,105 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
* where --few fragments of the same page-- are present in the SG as
* consecutive elements. Also, it handles one entry SG.
*/

static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct iser_page_vec *page_vec,
struct ib_device *ibdev)
{
struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
u64 start_addr, end_addr, page, chunk_start = 0;
struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
u64 first_addr, last_addr, page;
int end_aligned;
unsigned int cur_page = 0;
unsigned long total_sz = 0;
unsigned int dma_len;
int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
int i;

/* compute the offset of first element */
page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;

new_chunk = 1;
cur_page = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
start_addr = ib_sg_dma_address(ibdev, sg);
if (new_chunk)
chunk_start = start_addr;
dma_len = ib_sg_dma_len(ibdev, sg);
end_addr = start_addr + dma_len;
unsigned int dma_len = ib_sg_dma_len(ibdev, sg);

total_sz += dma_len;

/* collect page fragments until aligned or end of SG list */
if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
new_chunk = 0;
continue;
first_addr = ib_sg_dma_address(ibdev, sg);
last_addr = first_addr + dma_len;

end_aligned = !(last_addr & ~MASK_4K);

/* continue to collect page fragments till aligned or SG ends */
while (!end_aligned && (i + 1 < data->dma_nents)) {
sg = sg_next(sg);
i++;
dma_len = ib_sg_dma_len(ibdev, sg);
total_sz += dma_len;
last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
end_aligned = !(last_addr & ~MASK_4K);
}
new_chunk = 1;

/* address of the first page in the contiguous chunk;
masking relevant for the very first SG entry,
which might be unaligned */
page = chunk_start & MASK_4K;
do {
page_vec->pages[cur_page++] = page;

/* handle the 1st page in the 1st DMA element */
if (cur_page == 0) {
page = first_addr & MASK_4K;
page_vec->pages[cur_page] = page;
cur_page++;
page += SIZE_4K;
} while (page < end_addr);
}
} else
page = first_addr;

for (; page < last_addr; page += SIZE_4K) {
page_vec->pages[cur_page] = page;
cur_page++;
}

}
page_vec->data_size = total_sz;
iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
return cur_page;
}

#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)

/**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
* for RDMA sub-list of a scatter-gather list of memory buffers, and returns
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
struct ib_device *ibdev)
static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
struct ib_device *ibdev)
{
struct scatterlist *sgl, *sg, *next_sg = NULL;
u64 start_addr, end_addr;
int i, ret_len, start_check = 0;

if (data->dma_nents == 1)
return 1;
struct scatterlist *sgl, *sg;
u64 end_addr, next_addr;
int i, cnt;
unsigned int ret_len = 0;

sgl = (struct scatterlist *)data->buf;
start_addr = ib_sg_dma_address(ibdev, sgl);

cnt = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
if (start_check && !IS_4K_ALIGNED(start_addr))
break;

next_sg = sg_next(sg);
if (!next_sg)
break;

end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
start_addr = ib_sg_dma_address(ibdev, next_sg);

if (end_addr == start_addr) {
start_check = 0;
continue;
} else
start_check = 1;

if (!IS_4K_ALIGNED(end_addr))
break;
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
"offset: %ld sz: %ld\n", i,
(unsigned long)sg_phys(sg),
(unsigned long)sg->offset,
(unsigned long)sg->length); */
end_addr = ib_sg_dma_address(ibdev, sg) +
ib_sg_dma_len(ibdev, sg);
/* iser_dbg("Checking sg iobuf end address "
"0x%08lX\n", end_addr); */
if (i + 1 < data->dma_nents) {
next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
/* are i, i+1 fragments of the same page? */
if (end_addr == next_addr) {
cnt++;
continue;
} else if (!IS_4K_ALIGNED(end_addr)) {
ret_len = cnt + 1;
break;
}
}
cnt++;
}
ret_len = (next_sg) ? i : i+1;
if (i == data->dma_nents)
ret_len = cnt; /* loop ended */
iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
ret_len, data->dma_nents, data);
return ret_len;
Expand Down
5 changes: 5 additions & 0 deletions trunk/include/rdma/ib_verbs.h
Original file line number Diff line number Diff line change
Expand Up @@ -1425,6 +1425,11 @@ int ib_destroy_qp(struct ib_qp *qp);
* @send_wr: A list of work requests to post on the send queue.
* @bad_send_wr: On an immediate failure, this parameter will reference
* the work request that failed to be posted on the QP.
*
* While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
* error is returned, the QP state shall not be affected,
* ib_post_send() will return an immediate error after queueing any
* earlier work requests in the list.
*/
static inline int ib_post_send(struct ib_qp *qp,
struct ib_send_wr *send_wr,
Expand Down

0 comments on commit 2fa4d0b

Please sign in to comment.