Skip to content

Commit

Permalink
virtio-blk: reorganize virtblk_add_req
Browse files Browse the repository at this point in the history
Right now, both virtblk_add_req and virtblk_add_req_wait call
virtqueue_add_buf.  To prepare for the next patches, abstract the call
to virtqueue_add_buf into a new function __virtblk_add_req, and include
the waiting logic directly in virtblk_add_req.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Asias He <asias@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
  • Loading branch information
Paolo Bonzini authored and Rusty Russell committed Mar 20, 2013
1 parent e538eba commit 5ee21a5
Showing 1 changed file with 20 additions and 35 deletions.
55 changes: 20 additions & 35 deletions drivers/block/virtio_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,50 +100,39 @@ static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
return vbr;
}

static void virtblk_add_buf_wait(struct virtio_blk *vblk,
struct virtblk_req *vbr,
unsigned long out,
unsigned long in)
static inline int __virtblk_add_req(struct virtqueue *vq,
struct virtblk_req *vbr,
unsigned long out,
unsigned long in)
{
return virtqueue_add_buf(vq, vbr->sg, out, in, vbr, GFP_ATOMIC);
}

static void virtblk_add_req(struct virtblk_req *vbr,
unsigned int out, unsigned int in)
{
struct virtio_blk *vblk = vbr->vblk;
DEFINE_WAIT(wait);
int ret;

for (;;) {
spin_lock_irq(vblk->disk->queue->queue_lock);
while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr,
out, in)) < 0)) {
prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
TASK_UNINTERRUPTIBLE);

spin_unlock_irq(vblk->disk->queue->queue_lock);
io_schedule();
spin_lock_irq(vblk->disk->queue->queue_lock);
if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
GFP_ATOMIC) < 0) {
spin_unlock_irq(vblk->disk->queue->queue_lock);
io_schedule();
} else {
virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock);
break;
}

finish_wait(&vblk->queue_wait, &wait);
}

finish_wait(&vblk->queue_wait, &wait);
}

static inline void virtblk_add_req(struct virtblk_req *vbr,
unsigned int out, unsigned int in)
{
struct virtio_blk *vblk = vbr->vblk;

spin_lock_irq(vblk->disk->queue->queue_lock);
if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
GFP_ATOMIC) < 0)) {
spin_unlock_irq(vblk->disk->queue->queue_lock);
virtblk_add_buf_wait(vblk, vbr, out, in);
return;
}
virtqueue_kick(vblk->vq);
spin_unlock_irq(vblk->disk->queue->queue_lock);
}

static int virtblk_bio_send_flush(struct virtblk_req *vbr)
static void virtblk_bio_send_flush(struct virtblk_req *vbr)
{
unsigned int out = 0, in = 0;

Expand All @@ -155,11 +144,9 @@ static int virtblk_bio_send_flush(struct virtblk_req *vbr)
sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));

virtblk_add_req(vbr, out, in);

return 0;
}

static int virtblk_bio_send_data(struct virtblk_req *vbr)
static void virtblk_bio_send_data(struct virtblk_req *vbr)
{
struct virtio_blk *vblk = vbr->vblk;
unsigned int num, out = 0, in = 0;
Expand Down Expand Up @@ -188,8 +175,6 @@ static int virtblk_bio_send_data(struct virtblk_req *vbr)
}

virtblk_add_req(vbr, out, in);

return 0;
}

static void virtblk_bio_send_data_work(struct work_struct *work)
Expand Down

0 comments on commit 5ee21a5

Please sign in to comment.