Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 209157
b: refs/heads/master
c: 4a0b4dd
h: refs/heads/master
i:
  209155: 24f3ab0
v: v3
  • Loading branch information
Mike Snitzer authored and Alasdair G Kergon committed Aug 12, 2010
1 parent c43e87d commit da6c77f
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 27 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a5664dad7e1a278d2915c2bf79cf42250e12d7db
refs/heads/master: 4a0b4ddf261fc89c050fe0a10ec57a61251d7ac0
11 changes: 10 additions & 1 deletion trunk/drivers/md/dm-ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1189,7 +1189,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto out;
}

/* Protect md->type against concurrent table loads. */
/* Protect md->type and md->queue against concurrent table loads. */
dm_lock_md_type(md);
if (dm_get_md_type(md) == DM_TYPE_NONE)
/* Initial table load: acquire type of table. */
Expand All @@ -1201,6 +1201,15 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
r = -EINVAL;
goto out;
}

/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md);
if (r) {
DMWARN("unable to set up device queue for new table.");
dm_table_destroy(t);
dm_unlock_md_type(md);
goto out;
}
dm_unlock_md_type(md);

/* stage inactive table */
Expand Down
92 changes: 67 additions & 25 deletions trunk/drivers/md/dm.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ struct mapped_device {

struct request_queue *queue;
unsigned type;
/* Protect type against concurrent access. */
/* Protect queue and type against concurrent access. */
struct mutex type_lock;

struct gendisk *disk;
Expand Down Expand Up @@ -1856,6 +1856,28 @@ static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
static void dm_rq_barrier_work(struct work_struct *work);

static void dm_init_md_queue(struct mapped_device *md)
{
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
* devices. The type of this dm device has not been decided yet.
* The type is decided at the first table loading time.
* To prevent problematic device stacking, clear the queue flag
* for request stacking support until then.
*
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);

md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
}

/*
* Allocate and initialise a blank device with a given minor.
*/
Expand Down Expand Up @@ -1895,33 +1917,11 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);

md->queue = blk_init_queue(dm_request_fn, NULL);
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
goto bad_queue;

/*
* Request-based dm devices cannot be stacked on top of bio-based dm
* devices. The type of this dm device has not been decided yet,
* although we initialized the queue using blk_init_queue().
* The type is decided at the first table loading time.
* To prevent problematic device stacking, clear the queue flag
* for request stacking support until then.
*
* This queue is new, so no concurrency on the queue_flags.
*/
queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
md->saved_make_request_fn = md->queue->make_request_fn;
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
dm_init_md_queue(md);

md->disk = alloc_disk(1);
if (!md->disk)
Expand Down Expand Up @@ -2160,6 +2160,48 @@ unsigned dm_get_md_type(struct mapped_device *md)
return md->type;
}

/*
* Fully initialize a request-based queue (->elevator, ->request_fn, etc).
*/
static int dm_init_request_based_queue(struct mapped_device *md)
{
struct request_queue *q = NULL;

if (md->queue->elevator)
return 1;

/* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q)
return 0;

md->queue = q;
md->saved_make_request_fn = md->queue->make_request_fn;
dm_init_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);

elv_register_queue(md->queue);

return 1;
}

/*
* Setup the DM device's queue based on md's type
*/
int dm_setup_md_queue(struct mapped_device *md)
{
if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
!dm_init_request_based_queue(md)) {
DMWARN("Cannot initialize queue for request-based mapped device");
return -EINVAL;
}

return 0;
}

static struct mapped_device *dm_find_md(dev_t dev)
{
struct mapped_device *md;
Expand Down
2 changes: 2 additions & 0 deletions trunk/drivers/md/dm.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
unsigned dm_get_md_type(struct mapped_device *md);

int dm_setup_md_queue(struct mapped_device *md);

/*
* To check the return value from dm_table_find_target().
*/
Expand Down

0 comments on commit da6c77f

Please sign in to comment.