Skip to content

Commit

Permalink
bounce: Refactor __blk_queue_bounce to not use bi_io_vec
Browse files Browse the repository at this point in the history
A bunch of what __blk_queue_bounce() was doing was problematic for the
immutable bvec work; this cleans that up and the code is quite a bit
smaller, too.

The __bio_for_each_segment() in copy_to_high_bio_irq() was changed
because that one's looping over the original bio, not the bounce bio -
a later patch renames __bio_for_each_segment() ->
bio_for_each_segment_all(), and documents that
bio_for_each_segment_all() is only for code that owns the bio.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Kent Overstreet committed Mar 23, 2013
1 parent d3b45c2 commit 6bc454d
Showing 1 changed file with 19 additions and 54 deletions.
73 changes: 19 additions & 54 deletions mm/bounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
struct bio_vec *tovec, *fromvec;
int i;

__bio_for_each_segment(tovec, to, i, 0) {
bio_for_each_segment(tovec, to, i) {
fromvec = from->bi_io_vec + i;

/*
Expand Down Expand Up @@ -218,78 +218,43 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool, int force)
{
struct page *page;
struct bio *bio = NULL;
int i, rw = bio_data_dir(*bio_orig);
struct bio *bio;
int rw = bio_data_dir(*bio_orig);
struct bio_vec *to, *from;
unsigned i;

bio_for_each_segment(from, *bio_orig, i) {
page = from->bv_page;
bio_for_each_segment(from, *bio_orig, i)
if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
goto bounce;

/*
* is destination page below bounce pfn?
*/
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
continue;
return;
bounce:
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);

/*
* irk, bounce it
*/
if (!bio) {
unsigned int cnt = (*bio_orig)->bi_vcnt;

bio = bio_alloc(GFP_NOIO, cnt);
memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
}

bio_for_each_segment(to, bio, i) {
struct page *page = to->bv_page;

to = bio->bi_io_vec + i;
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
continue;

to->bv_page = mempool_alloc(pool, q->bounce_gfp);
to->bv_len = from->bv_len;
to->bv_offset = from->bv_offset;
inc_zone_page_state(to->bv_page, NR_BOUNCE);
to->bv_page = mempool_alloc(pool, q->bounce_gfp);

if (rw == WRITE) {
char *vto, *vfrom;

flush_dcache_page(from->bv_page);
flush_dcache_page(page);

vto = page_address(to->bv_page) + to->bv_offset;
vfrom = kmap(from->bv_page) + from->bv_offset;
vfrom = kmap_atomic(page) + to->bv_offset;
memcpy(vto, vfrom, to->bv_len);
kunmap(from->bv_page);
kunmap_atomic(vfrom);
}
}

/*
* no pages bounced
*/
if (!bio)
return;

trace_block_bio_bounce(q, *bio_orig);

/*
* at least one page was bounced, fill in possible non-highmem
* pages
*/
__bio_for_each_segment(from, *bio_orig, i, 0) {
to = bio_iovec_idx(bio, i);
if (!to->bv_page) {
to->bv_page = from->bv_page;
to->bv_len = from->bv_len;
to->bv_offset = from->bv_offset;
}
}

bio->bi_bdev = (*bio_orig)->bi_bdev;
bio->bi_flags |= (1 << BIO_BOUNCED);
bio->bi_sector = (*bio_orig)->bi_sector;
bio->bi_rw = (*bio_orig)->bi_rw;

bio->bi_vcnt = (*bio_orig)->bi_vcnt;
bio->bi_idx = (*bio_orig)->bi_idx;
bio->bi_size = (*bio_orig)->bi_size;

if (pool == page_pool) {
bio->bi_end_io = bounce_end_io_write;
Expand Down

0 comments on commit 6bc454d

Please sign in to comment.