Skip to content

Commit

Permalink
dm: handle requests beyond end of device instead of using BUG_ON
Browse files Browse the repository at this point in the history
The access beyond the end of device BUG_ON that was introduced to
dm_request_fn via commit 29e4013 ("dm: implement
REQ_FLUSH/FUA support for request-based dm") was an overly
drastic (but simple) response to this situation.

I have received a report that this BUG_ON was hit and now think
it would be better to use dm_kill_unmapped_request() to fail the clone
and original request with -EIO.

map_request() will assign the valid target returned by
dm_table_find_target to tio->ti.  But when the target
isn't valid tio->ti is never assigned (because map_request isn't
called); so add a check for tio->ti != NULL to dm_done().

Reported-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Cc: stable@vger.kernel.org # v2.6.37+
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
  • Loading branch information
Mike Snitzer authored and Alasdair G Kergon committed Sep 26, 2012
1 parent 7ba10aa commit ba1cbad
Showing 1 changed file with 38 additions and 18 deletions.
56 changes: 38 additions & 18 deletions drivers/md/dm.c
Original file line number Diff line number Diff line change
Expand Up @@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
{
int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
dm_request_endio_fn rq_end_io = NULL;

if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
if (tio->ti) {
rq_end_io = tio->ti->type->rq_end_io;

if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}

if (r <= 0)
/* The target wants to complete the I/O */
Expand Down Expand Up @@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;

/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
* because the device may be closed during the request completion
* when all bios are completed.
* See the comment in rq_completed() too.
*/
dm_get(md);

tio->ti = ti;
r = ti->type->map_rq(ti, clone, &tio->info);
switch (r) {
Expand Down Expand Up @@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
return requeued;
}

static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
{
struct request *clone;

blk_start_request(orig);
clone = orig->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);

/*
* Hold the md reference here for the in-flight I/O.
* We can't rely on the reference count by device opener,
* because the device may be closed during the request completion
* when all bios are completed.
* See the comment in rq_completed() too.
*/
dm_get(md);

return clone;
}

/*
* q->request_fn for request-based dm.
* Called with the queue lock held.
Expand Down Expand Up @@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)
pos = blk_rq_pos(rq);

ti = dm_table_find_target(map, pos);
BUG_ON(!dm_target_is_valid(ti));
if (!dm_target_is_valid(ti)) {
/*
* Must perform setup, that dm_done() requires,
* before calling dm_kill_unmapped_request
*/
DMERR_LIMIT("request attempted access beyond the end of device");
clone = dm_start_request(md, rq);
dm_kill_unmapped_request(clone, -EIO);
continue;
}

if (ti->type->busy && ti->type->busy(ti))
goto delay_and_out;

blk_start_request(rq);
clone = rq->special;
atomic_inc(&md->pending[rq_data_dir(clone)]);
clone = dm_start_request(md, rq);

spin_unlock(q->queue_lock);
if (map_request(ti, clone, md))
Expand All @@ -1684,8 +1706,6 @@ static void dm_request_fn(struct request_queue *q)
blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);

return;
}

int dm_underlying_device_busy(struct request_queue *q)
Expand Down

0 comments on commit ba1cbad

Please sign in to comment.