Skip to content

Commit

Permalink
xen/blkback: Seperate the bio allocation and the bio submission.
Browse files Browse the repository at this point in the history
We seperate the bio allocation (bio_alloc) from the bio submission so
that the error paths are much easier, and also so that the bio
submission can be done in one tight loop. It also makes the
plug/unplug calls much much easier.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  • Loading branch information
Konrad Rzeszutek Wilk authored and Konrad Rzeszutek Wilk committed Apr 15, 2011
1 parent 0faa8cc commit 7708992
Showing 1 changed file with 23 additions and 22 deletions.
45 changes: 23 additions & 22 deletions drivers/xen/blkback/blkback.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int nseg;
struct bio *bio = NULL;
int ret, i;
struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int ret, i, nbio = 0;
int operation;
struct blk_plug plug;
struct request_queue *q;
Expand Down Expand Up @@ -529,14 +530,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
goto fail_flush;
}

/* Get a reference count for the disk queue and start sending I/O */
blk_get_queue(q);
blk_start_plug(&plug);

/* We set it one so that the last submit_bio does not have to call
* atomic_inc.
*/
atomic_set(&pending_req->pendcnt, 1);
/* This corresponding blkif_put is done in __end_block_io_op */
blkif_get(blkif);

for (i = 0; i < nseg; i++) {
Expand All @@ -552,12 +546,8 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
blkbk->pending_page(pending_req, i),
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
if (bio) {
atomic_inc(&pending_req->pendcnt);
submit_bio(operation, bio);
}

bio = bio_alloc(GFP_KERNEL, nseg-i);
bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
if (unlikely(bio == NULL))
goto fail_put_bio;

Expand All @@ -573,7 +563,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
/* This will be hit if the operation was a barrier. */
if (!bio) {
BUG_ON(operation != WRITE_BARRIER);
bio = bio_alloc(GFP_KERNEL, 0);
bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
goto fail_put_bio;

Expand All @@ -583,15 +573,28 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
bio->bi_sector = -1;
}

submit_bio(operation, bio);

/* We set it one so that the last submit_bio does not have to call
* atomic_inc.
*/
atomic_set(&pending_req->pendcnt, nbio);

/* Get a reference count for the disk queue and start sending I/O */
blk_get_queue(q);
blk_start_plug(&plug);

for (i = 0; i < nbio; i++)
submit_bio(operation, biolist[i]);

blk_finish_plug(&plug);
/* Let the I/Os go.. */
blk_put_queue(q);

if (operation == READ)
blkif->st_rd_sect += preq.nr_sects;
else if (operation == WRITE || operation == WRITE_BARRIER)
blkif->st_wr_sect += preq.nr_sects;

blk_finish_plug(&plug);
blk_put_queue(q);
return;

fail_flush:
Expand All @@ -604,11 +607,9 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
return;

fail_put_bio:
for (i = 0; i < (nbio-1); i++)
bio_put(biolist[i]);
__end_block_io_op(pending_req, -EINVAL);
if (bio)
bio_put(bio);
blk_finish_plug(&plug);
blk_put_queue(q);
msleep(1); /* back off a bit */
return;
}
Expand Down

0 comments on commit 7708992

Please sign in to comment.