Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 334234
b: refs/heads/master
c: c83057a
h: refs/heads/master
v: v3
  • Loading branch information
Shaohua Li authored and NeilBrown committed Oct 11, 2012
1 parent 7b64ecc commit ef41013
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f1cad2b68ed12c0f82d3f56e150691f62b6f5edf
refs/heads/master: c83057a1f4f987327c49448b046d9625c612ed8e
19 changes: 18 additions & 1 deletion trunk/drivers/md/raid0.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
bool discard_supported = false;

if (!conf)
return -ENOMEM;
Expand Down Expand Up @@ -195,6 +196,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;

if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
discard_supported = true;
}
if (cnt != mddev->raid_disks) {
printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
Expand Down Expand Up @@ -272,6 +276,11 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);

if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);

pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;

Expand Down Expand Up @@ -423,6 +432,7 @@ static int raid0_run(struct mddev *mddev)
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);

/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
Expand Down Expand Up @@ -510,7 +520,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
sector_t sector = bio->bi_sector;
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 ||
if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
bio->bi_idx != 0)
goto bad_map;
/* This is a one page bio that upper layers
Expand All @@ -536,6 +546,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
bio->bi_sector = sector_offset + zone->dev_start +
tmp_dev->data_offset;

if (unlikely((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
/* Just ignore it */
bio_endio(bio, 0);
return;
}

generic_make_request(bio);
return;

Expand Down

0 comments on commit ef41013

Please sign in to comment.