Skip to content

Commit

Permalink
block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead
Browse files Browse the repository at this point in the history
commit e692cb6 upstream.

When stacking devices, a request_queue is not always available. This
forced us to have a no_cluster flag in the queue_limits that could be
used as a carrier until the request_queue had been set up for a
metadevice.

There were several problems with that approach. First of all it was up
to the stacking device to remember to set queue flag after stacking had
completed. Also, the queue flag and the queue limits had to be kept in
sync at all times. We got that wrong, which could lead to us issuing
commands that went beyond the max scatterlist limit set by the driver.

The proper fix is to avoid having two flags for tracking the same thing.
We deprecate QUEUE_FLAG_CLUSTER and use the queue limit directly in the
block layer merging functions. The queue_limit 'no_cluster' is turned
into 'cluster' to avoid double negatives and to ease stacking.
Clustering defaults to being enabled as before. The queue flag logic is
removed from the stacking function, and explicitly setting the cluster
flag is no longer necessary in DM and MD.

Reported-by: Ed Lin <ed.lin@promise.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
  • Loading branch information
Martin K. Petersen authored and Greg Kroah-Hartman committed Jan 7, 2011
1 parent 52cbcb0 commit dce52ea
Show file tree
Hide file tree
Showing 6 changed files with 12 additions and 38 deletions.
6 changes: 3 additions & 3 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
return 0;

fbio = bio;
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
cluster = blk_queue_cluster(q);
seg_size = 0;
phys_size = nr_phys_segs = 0;
for_each_bio(bio) {
Expand Down Expand Up @@ -88,7 +88,7 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
if (!blk_queue_cluster(q))
return 0;

if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
Expand Down Expand Up @@ -124,7 +124,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nsegs, cluster;

nsegs = 0;
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
cluster = blk_queue_cluster(q);

/*
* for each bio in rq
Expand Down
24 changes: 2 additions & 22 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
lim->no_cluster = 0;
lim->cluster = 1;
}
EXPORT_SYMBOL(blk_set_default_limits);

Expand Down Expand Up @@ -477,15 +477,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
blk_stack_limits(&t->limits, &b->limits, 0);

if (!t->queue_lock)
WARN_ON_ONCE(1);
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
unsigned long flags;
spin_lock_irqsave(t->queue_lock, flags);
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
spin_unlock_irqrestore(t->queue_lock, flags);
}
}
EXPORT_SYMBOL(blk_queue_stack_limits);

Expand Down Expand Up @@ -561,7 +552,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);

t->no_cluster |= b->no_cluster;
t->cluster &= b->cluster;

/* Physical block size a multiple of the logical block size? */
if (t->physical_block_size & (t->logical_block_size - 1)) {
Expand Down Expand Up @@ -652,17 +643,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}

if (!t->queue_lock)
WARN_ON_ONCE(1);
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
unsigned long flags;

spin_lock_irqsave(t->queue_lock, flags);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
spin_unlock_irqrestore(t->queue_lock, flags);
}
}
EXPORT_SYMBOL(disk_stack_limits);

Expand Down
5 changes: 0 additions & 5 deletions drivers/md/dm-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -1082,11 +1082,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
q->limits = *limits;

if (limits->no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);

dm_table_set_integrity(t);

/*
Expand Down
3 changes: 0 additions & 3 deletions drivers/md/md.c
Original file line number Diff line number Diff line change
Expand Up @@ -3959,9 +3959,6 @@ static int md_alloc(dev_t dev, char *name)
goto abort;
mddev->queue->queuedata = mddev;

/* Can be unlocked because the queue is new: no concurrency */
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);

blk_queue_make_request(mddev->queue, md_make_request);

disk = alloc_disk(1 << shift);
Expand Down
3 changes: 1 addition & 2 deletions drivers/scsi/scsi_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -1636,9 +1636,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,

blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));

/* New queue, no concurrency on queue_flags */
if (!shost->use_clustering)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
q->limits.cluster = 0;

/*
* set a reasonable default alignment on word boundaries: the
Expand Down
9 changes: 6 additions & 3 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ struct queue_limits {
unsigned short max_phys_segments;

unsigned char misaligned;
unsigned char no_cluster;
unsigned char cluster;
};

struct request_queue
Expand Down Expand Up @@ -440,7 +440,6 @@ struct request_queue
#endif
};

#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
Expand All @@ -461,7 +460,6 @@ struct request_queue
#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */

#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP))

Expand Down Expand Up @@ -627,6 +625,11 @@ enum {

#define rq_data_dir(rq) ((rq)->cmd_flags & 1)

static inline unsigned int blk_queue_cluster(struct request_queue *q)
{
return q->limits.cluster;
}

/*
* We regard a request as sync, if either a read or a sync write
*/
Expand Down

0 comments on commit dce52ea

Please sign in to comment.