Skip to content

Commit

Permalink
target/user: Use iovec[] to describe continuous area
Browse files Browse the repository at this point in the history
We don't need use one iovec per scatter-gather list entry, since data
area are continuous.

Reviewed-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Sheng Yang <sheng@yasker.org>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
  • Loading branch information
Sheng Yang authored and Nicholas Bellinger committed Mar 11, 2016
1 parent 03a68b4 commit f1dbd08
Showing 1 changed file with 26 additions and 15 deletions.
41 changes: 26 additions & 15 deletions drivers/target/target_core_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,23 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head;
}

static inline void new_iov(struct iovec **iov, int *iov_cnt,
struct tcmu_dev *udev)
{
struct iovec *iovec;

if (*iov_cnt != 0)
(*iov)++;
(*iov_cnt)++;

iovec = *iov;
memset(iovec, 0, sizeof(struct iovec));

/* Even iov_base is relative to mb_addr */
iovec->iov_base = (void __user *) udev->data_off +
udev->data_head;
}

#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)

static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
Expand All @@ -242,6 +259,10 @@ static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
size_t copy_bytes;
struct scatterlist *sg;

if (data_nents == 0)
return;

new_iov(iov, iov_cnt, udev);
for_each_sg(data_sg, sg, data_nents, i) {
copy_bytes = min_t(size_t, sg->length,
head_to_end(udev->data_head, udev->data_size));
Expand All @@ -253,12 +274,7 @@ static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
tcmu_flush_dcache_range(to, copy_bytes);
}

/* Even iov_base is relative to mb_addr */
(*iov)->iov_len = copy_bytes;
(*iov)->iov_base = (void __user *) udev->data_off +
udev->data_head;
(*iov_cnt)++;
(*iov)++;
(*iov)->iov_len += copy_bytes;

UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);

Expand All @@ -268,9 +284,8 @@ static void alloc_and_scatter_data_area(struct tcmu_dev *udev,

copy_bytes = sg->length - copy_bytes;

new_iov(iov, iov_cnt, udev);
(*iov)->iov_len = copy_bytes;
(*iov)->iov_base = (void __user *) udev->data_off +
udev->data_head;

if (copy_data) {
to = (void *) udev->mb_addr +
Expand All @@ -279,8 +294,6 @@ static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
tcmu_flush_dcache_range(to, copy_bytes);
}

(*iov_cnt)++;
(*iov)++;

UPDATE_HEAD(udev->data_head,
copy_bytes, udev->data_size);
Expand Down Expand Up @@ -393,12 +406,10 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
* Must be a certain minimum size for response sense info, but
* also may be larger if the iov array is large.
*
* iovs = sgl_nents+1, for end-of-ring case, plus another 1
* b/c size == offsetof one-past-element.
* 3 iovs since we can describe the whole continuous are using one
* for data, one for bidi and one more in the case of wrap.
*/
base_command_size = max(offsetof(struct tcmu_cmd_entry,
req.iov[se_cmd->t_bidi_data_nents +
se_cmd->t_data_nents + 2]),
base_command_size = max(offsetof(struct tcmu_cmd_entry, req.iov[3]),
sizeof(struct tcmu_cmd_entry));
command_size = base_command_size
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
Expand Down

0 comments on commit f1dbd08

Please sign in to comment.