Skip to content

Commit

Permalink
target/user: Recalculate pad size inside is_ring_space_avail()
Browse files Browse the repository at this point in the history
If more than one thread is waiting for command ring space that includes
a PAD, then if the first one finishes (inserts a PAD and a CMD at the
start of the cmd ring) then the second one will incorrectly think it still
needs to insert a PAD (i.e. cmdr_space_needed is now wrong.) This will
lead to it asking for more space than it actually needs, and then inserting
a PAD somewhere else than at the end -- not what we want.

This patch moves the pad calculation inside is_ring_space_available() so
in the above scenario the second thread would then ask for space not
including a PAD. The patch also inserts a PAD op based upon an up-to-date
cmd_head, instead of the potentially stale value.

Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
  • Loading branch information
Andy Grover authored and Nicholas Bellinger committed Oct 3, 2014
1 parent 6375f89 commit f56574a
Showing 1 changed file with 16 additions and 15 deletions.
31 changes: 16 additions & 15 deletions drivers/target/target_core_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,16 +236,26 @@ static inline size_t head_to_end(size_t head, size_t size)
*
* Called with ring lock held.
*/
static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_needed, size_t data_needed)
static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
{
struct tcmu_mailbox *mb = udev->mb_addr;
size_t space;
u32 cmd_head;
size_t cmd_needed;

tcmu_flush_dcache_range(mb, sizeof(*mb));

cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */

/*
* If cmd end-of-ring space is too small then we need space for a NOP plus
* original cmd - cmds are internally contiguous.
*/
if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
cmd_needed = cmd_size;
else
cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);

space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
if (space < cmd_needed) {
pr_debug("no cmd space: %u %u %u\n", cmd_head,
Expand All @@ -268,9 +278,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
size_t base_command_size, command_size;
size_t cmdr_space_needed;
struct tcmu_mailbox *mb;
size_t pad_size;
struct tcmu_cmd_entry *entry;
int i;
struct scatterlist *sg;
Expand Down Expand Up @@ -307,17 +315,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
"cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
udev->cmdr_size, udev->data_size);

/*
* Cmd end-of-ring space is too small so we need space for a NOP plus
* original cmd - cmds are internally contiguous.
*/
if (head_to_end(cmd_head, udev->cmdr_size) >= command_size)
pad_size = 0;
else
pad_size = head_to_end(cmd_head, udev->cmdr_size);
cmdr_space_needed = command_size + pad_size;

while (!is_ring_space_avail(udev, cmdr_space_needed, tcmu_cmd->data_length)) {
while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
int ret;
DEFINE_WAIT(__wait);

Expand All @@ -338,7 +336,10 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
}

if (pad_size) {
/* Insert a PAD if end-of-ring space is too small */
if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);

entry = (void *) mb + CMDR_OFF + cmd_head;
tcmu_flush_dcache_range(entry, sizeof(*entry));
tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD);
Expand Down

0 comments on commit f56574a

Please sign in to comment.