Skip to content

Commit

Permalink
bcache: Use standard utility code
Browse files Browse the repository at this point in the history
Some of bcache's utility code has made it into the rest of the kernel,
so drop the bcache versions.

Bcache used to have a workaround for allocating from a bio set under
generic_make_request() (if you allocated more than once, the bios you
already allocated would get stuck on current->bio_list when you
submitted, and you'd risk deadlock) - bcache would mask out __GFP_WAIT
when allocating bios under generic_make_request() so that allocation
could fail and it could retry from workqueue. But bio_alloc_bioset() has
a workaround now, so we can drop this hack and the associated error
handling.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
  • Loading branch information
Kent Overstreet authored and Kent Overstreet committed Jul 1, 2013
1 parent 47cd2eb commit 8e51e41
Show file tree
Hide file tree
Showing 8 changed files with 51 additions and 144 deletions.
7 changes: 2 additions & 5 deletions drivers/md/bcache/btree.c
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ static void do_btree_node_write(struct btree *b)
bkey_copy(&k.key, &b->key);
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));

if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) {
if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
Expand Down Expand Up @@ -1865,7 +1865,7 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
should_split(b))
goto out;

op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio));
op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));

SET_KEY_PTRS(&op->replace, 1);
get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
Expand Down Expand Up @@ -2194,9 +2194,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
KEY_OFFSET(k) - bio->bi_sector);

n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
if (!n)
return -EAGAIN;

if (n == bio)
op->lookup_done = true;

Expand Down
2 changes: 1 addition & 1 deletion drivers/md/bcache/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ void bch_data_verify(struct search *s)
if (!check)
return;

if (bch_bio_alloc_pages(check, GFP_NOIO))
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;

check->bi_rw = READ_SYNC;
Expand Down
64 changes: 22 additions & 42 deletions drivers/md/bcache/io.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,6 @@ static void bch_generic_make_request_hack(struct bio *bio)
* The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
* bvec boundry; it is the caller's responsibility to ensure that @bio is not
* freed before the split.
*
* If bch_bio_split() is running under generic_make_request(), it's not safe to
* allocate more than one bio from the same bio set. Therefore, if it is running
* under generic_make_request() it masks out __GFP_WAIT when doing the
* allocation. The caller must check for failure if there's any possibility of
* it being called from under generic_make_request(); it is then the caller's
* responsibility to retry from a safe context (by e.g. punting to workqueue).
*/
struct bio *bch_bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
Expand All @@ -85,15 +78,6 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,

BUG_ON(sectors <= 0);

/*
* If we're being called from underneath generic_make_request() and we
* already allocated any bios from this bio set, we risk deadlock if we
* use the mempool. So instead, we possibly fail and let the caller punt
* to workqueue or somesuch and retry in a safe context.
*/
if (current->bio_list)
gfp &= ~__GFP_WAIT;

if (sectors >= bio_sectors(bio))
return bio;

Expand Down Expand Up @@ -164,28 +148,33 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
queue_max_segments(q));
struct bio_vec *bv, *end = bio_iovec(bio) +
min_t(int, bio_segments(bio), max_segments);

if (bio->bi_rw & REQ_DISCARD)
return min(ret, q->limits.max_discard_sectors);

if (bio_segments(bio) > max_segments ||
q->merge_bvec_fn) {
struct bio_vec *bv;
int i, seg = 0;

ret = 0;

for (bv = bio_iovec(bio); bv < end; bv++) {
bio_for_each_segment(bv, bio, i) {
struct bvec_merge_data bvm = {
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_sector,
.bi_size = ret << 9,
.bi_rw = bio->bi_rw,
};

if (seg == max_segments)
break;

if (q->merge_bvec_fn &&
q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
break;

seg++;
ret += bv->bv_len >> 9;
}
}
Expand Down Expand Up @@ -222,30 +211,10 @@ static void bch_bio_submit_split_endio(struct bio *bio, int error)
closure_put(cl);
}

static void __bch_bio_submit_split(struct closure *cl)
{
struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
struct bio *bio = s->bio, *n;

do {
n = bch_bio_split(bio, bch_bio_max_sectors(bio),
GFP_NOIO, s->p->bio_split);
if (!n)
continue_at(cl, __bch_bio_submit_split, system_wq);

n->bi_end_io = bch_bio_submit_split_endio;
n->bi_private = cl;

closure_get(cl);
bch_generic_make_request_hack(n);
} while (n != bio);

continue_at(cl, bch_bio_submit_split_done, NULL);
}

void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
{
struct bio_split_hook *s;
struct bio *n;

if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
goto submit;
Expand All @@ -254,15 +223,26 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
goto submit;

s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
closure_init(&s->cl, NULL);

s->bio = bio;
s->p = p;
s->bi_end_io = bio->bi_end_io;
s->bi_private = bio->bi_private;
bio_get(bio);

closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL);
return;
do {
n = bch_bio_split(bio, bch_bio_max_sectors(bio),
GFP_NOIO, s->p->bio_split);

n->bi_end_io = bch_bio_submit_split_endio;
n->bi_private = &s->cl;

closure_get(&s->cl);
bch_generic_make_request_hack(n);
} while (n != bio);

continue_at(&s->cl, bch_bio_submit_split_done, NULL);
submit:
bch_generic_make_request_hack(bio);
}
Expand Down
7 changes: 4 additions & 3 deletions drivers/md/bcache/movinggc.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,10 @@ static void write_moving_finish(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, s.cl);
struct bio *bio = &io->bio.bio;
struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt);
struct bio_vec *bv;
int i;

while (bv-- != bio->bi_io_vec)
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);

if (io->s.op.insert_collision)
Expand Down Expand Up @@ -158,7 +159,7 @@ static void read_moving(struct closure *cl)
bio->bi_rw = READ;
bio->bi_end_io = read_moving_endio;

if (bch_bio_alloc_pages(bio, GFP_KERNEL))
if (bio_alloc_pages(bio, GFP_KERNEL))
goto err;

trace_bcache_gc_copy(&w->key);
Expand Down
87 changes: 18 additions & 69 deletions drivers/md/bcache/request.c
Original file line number Diff line number Diff line change
Expand Up @@ -509,10 +509,6 @@ static void bch_insert_data_loop(struct closure *cl)
goto err;

n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
if (!n) {
__bkey_put(op->c, k);
continue_at(cl, bch_insert_data_loop, bcache_wq);
}

n->bi_end_io = bch_insert_data_endio;
n->bi_private = cl;
Expand Down Expand Up @@ -821,53 +817,13 @@ static void request_read_done(struct closure *cl)
*/

if (s->op.cache_bio) {
struct bio_vec *src, *dst;
unsigned src_offset, dst_offset, bytes;
void *dst_ptr;

bio_reset(s->op.cache_bio);
s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
bch_bio_map(s->op.cache_bio, NULL);

src = bio_iovec(s->op.cache_bio);
dst = bio_iovec(s->cache_miss);
src_offset = src->bv_offset;
dst_offset = dst->bv_offset;
dst_ptr = kmap(dst->bv_page);

while (1) {
if (dst_offset == dst->bv_offset + dst->bv_len) {
kunmap(dst->bv_page);
dst++;
if (dst == bio_iovec_idx(s->cache_miss,
s->cache_miss->bi_vcnt))
break;

dst_offset = dst->bv_offset;
dst_ptr = kmap(dst->bv_page);
}

if (src_offset == src->bv_offset + src->bv_len) {
src++;
if (src == bio_iovec_idx(s->op.cache_bio,
s->op.cache_bio->bi_vcnt))
BUG();

src_offset = src->bv_offset;
}

bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
src->bv_offset + src->bv_len - src_offset);

memcpy(dst_ptr + dst_offset,
page_address(src->bv_page) + src_offset,
bytes);

src_offset += bytes;
dst_offset += bytes;
}
bio_copy_data(s->cache_miss, s->op.cache_bio);

bio_put(s->cache_miss);
s->cache_miss = NULL;
Expand Down Expand Up @@ -912,9 +868,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *miss;

miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
if (!miss)
return -EAGAIN;

if (miss == bio)
s->op.lookup_done = true;

Expand All @@ -933,8 +886,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
reada = min(dc->readahead >> 9,
sectors - bio_sectors(miss));

if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
reada = bdev_sectors(miss->bi_bdev) -
bio_end_sector(miss);
}

s->cache_bio_sectors = bio_sectors(miss) + reada;
Expand All @@ -958,7 +912,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_put;

bch_bio_map(s->op.cache_bio, NULL);
if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;

s->cache_miss = miss;
Expand Down Expand Up @@ -1002,7 +956,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
struct bio *bio = &s->bio.bio;
struct bkey start, end;
start = KEY(dc->disk.id, bio->bi_sector, 0);
end = KEY(dc->disk.id, bio_end(bio), 0);
end = KEY(dc->disk.id, bio_end_sector(bio), 0);

bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);

Expand Down Expand Up @@ -1176,7 +1130,7 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
if (i->sequential + bio->bi_size > i->sequential)
i->sequential += bio->bi_size;

i->last = bio_end(bio);
i->last = bio_end_sector(bio);
i->jiffies = jiffies + msecs_to_jiffies(5000);
s->task->sequential_io = i->sequential;

Expand Down Expand Up @@ -1294,30 +1248,25 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
struct bio_vec *bv;
int i;

/* Zero fill bio */

while (bio->bi_idx != bio->bi_vcnt) {
struct bio_vec *bv = bio_iovec(bio);
bio_for_each_segment(bv, bio, i) {
unsigned j = min(bv->bv_len >> 9, sectors);

void *p = kmap(bv->bv_page);
memset(p + bv->bv_offset, 0, j << 9);
kunmap(bv->bv_page);

bv->bv_len -= j << 9;
bv->bv_offset += j << 9;

if (bv->bv_len)
return 0;

bio->bi_sector += j;
bio->bi_size -= j << 9;

bio->bi_idx++;
sectors -= j;
sectors -= j;
}

s->op.lookup_done = true;
bio_advance(bio, min(sectors << 9, bio->bi_size));

if (!bio->bi_size)
s->op.lookup_done = true;

return 0;
}
Expand All @@ -1344,8 +1293,8 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
closure_call(&s->op.cl, btree_read_async, NULL, cl);
} else if (bio_has_data(bio) || s->op.skip) {
bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
&KEY(d->id, bio->bi_sector, 0),
&KEY(d->id, bio_end(bio), 0));
&KEY(d->id, bio->bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));

s->writeback = true;
s->op.cache_bio = bio;
Expand Down
17 changes: 0 additions & 17 deletions drivers/md/bcache/util.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,23 +228,6 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}

int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp)
{
int i;
struct bio_vec *bv;

bio_for_each_segment(bv, bio, i) {
bv->bv_page = alloc_page(gfp);
if (!bv->bv_page) {
while (bv-- != bio->bi_io_vec + bio->bi_idx)
__free_page(bv->bv_page);
return -ENOMEM;
}
}

return 0;
}

/*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
* use permitted, subject to terms of PostgreSQL license; see.)
Expand Down
4 changes: 0 additions & 4 deletions drivers/md/bcache/util.h
Original file line number Diff line number Diff line change
Expand Up @@ -564,12 +564,8 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
return x;
}

#define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio))

void bch_bio_map(struct bio *bio, void *base);

int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp);

static inline sector_t bdev_sectors(struct block_device *bdev)
{
return bdev->bd_inode->i_size >> 9;
Expand Down
Loading

0 comments on commit 8e51e41

Please sign in to comment.