Skip to content

Commit

Permalink
dm crypt: fix remove first_clone
Browse files Browse the repository at this point in the history
Get rid of first_clone in dm-crypt

This gets rid of first_clone, which is not really needed.  Apparently, cloned
bios used to share their bvec some time way in the past - this is no longer
the case.  Contrarily, this even hurts us if we try to create a clone off
first_clone after it has completed, and crypt_endio has destroyed its bvec.

Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Olaf Kirch authored and Linus Torvalds committed May 9, 2007
1 parent 98221eb commit 2f9941b
Showing 1 changed file with 6 additions and 28 deletions.
34 changes: 6 additions & 28 deletions drivers/md/dm-crypt.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
struct crypt_io {
struct dm_target *target;
struct bio *base_bio;
struct bio *first_clone;
struct work_struct work;
atomic_t pending;
int error;
Expand Down Expand Up @@ -380,22 +379,16 @@ static int crypt_convert(struct crypt_config *cc,
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
static struct bio *
crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
struct bio *base_bio, unsigned int *bio_vec_idx)
static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
unsigned int *bio_vec_idx)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned int i;

if (base_bio) {
clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
__bio_clone(clone, base_bio);
} else
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);

clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone)
return NULL;

Expand Down Expand Up @@ -498,9 +491,6 @@ static void dec_pending(struct crypt_io *io, int error)
if (!atomic_dec_and_test(&io->pending))
return;

if (io->first_clone)
bio_put(io->first_clone);

bio_endio(io->base_bio, io->base_bio->bi_size, io->error);

mempool_free(io, cc->io_pool);
Expand Down Expand Up @@ -618,8 +608,7 @@ static void process_write(struct crypt_io *io)
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
clone = crypt_alloc_buffer(io, base_bio->bi_size,
io->first_clone, &bvec_idx);
clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx);
if (unlikely(!clone)) {
dec_pending(io, -ENOMEM);
return;
Expand All @@ -635,21 +624,11 @@ static void process_write(struct crypt_io *io)
}

clone->bi_sector = cc->start + sector;

if (!io->first_clone) {
/*
* hold a reference to the first clone, because it
* holds the bio_vec array and that can't be freed
* before all other clones are released
*/
bio_get(clone);
io->first_clone = clone;
}

remaining -= clone->bi_size;
sector += bio_sectors(clone);

/* prevent bio_put of first_clone */
/* Grab another reference to the io struct
* before we kick off the request */
if (remaining)
atomic_inc(&io->pending);

Expand Down Expand Up @@ -965,7 +944,6 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
io->base_bio = bio;
io->first_clone = NULL;
io->error = io->post_process = 0;
atomic_set(&io->pending, 0);
kcryptd_queue_io(io);
Expand Down

0 comments on commit 2f9941b

Please sign in to comment.