Skip to content

Commit

Permalink
md: update the optimal I/O size on reshape
Browse files Browse the repository at this point in the history
The raid5 and raid10 drivers currently update the read-ahead size,
but not the optimal I/O size on reshape.  To prepare for deriving the
read-ahead size from the optimal I/O size make sure it is updated
as well.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Song Liu <song@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Christoph Hellwig authored and Jens Axboe committed Sep 24, 2020
1 parent 55b2598 commit 16ef510
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 10 deletions.
22 changes: 14 additions & 8 deletions drivers/md/raid10.c
Original file line number Diff line number Diff line change
Expand Up @@ -3703,10 +3703,20 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}

static void raid10_set_io_opt(struct r10conf *conf)
{
int raid_disks = conf->geo.raid_disks;

if (!(conf->geo.raid_disks % conf->geo.near_copies))
raid_disks /= conf->geo.near_copies;
blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
raid_disks);
}

static int raid10_run(struct mddev *mddev)
{
struct r10conf *conf;
int i, disk_idx, chunk_size;
int i, disk_idx;
struct raid10_info *disk;
struct md_rdev *rdev;
sector_t size;
Expand Down Expand Up @@ -3742,18 +3752,13 @@ static int raid10_run(struct mddev *mddev)
mddev->thread = conf->thread;
conf->thread = NULL;

chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
else
blk_queue_io_opt(mddev->queue, chunk_size *
(conf->geo.raid_disks / conf->geo.near_copies));
blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
raid10_set_io_opt(conf);
}

rdev_for_each(rdev, mddev) {
Expand Down Expand Up @@ -4727,6 +4732,7 @@ static void end_reshape(struct r10conf *conf)
stripe /= conf->geo.near_copies;
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
raid10_set_io_opt(conf);
}
conf->fullsync = 0;
}
Expand Down
10 changes: 8 additions & 2 deletions drivers/md/raid5.c
Original file line number Diff line number Diff line change
Expand Up @@ -7232,6 +7232,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}

static void raid5_set_io_opt(struct r5conf *conf)
{
blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
(conf->raid_disks - conf->max_degraded));
}

static int raid5_run(struct mddev *mddev)
{
struct r5conf *conf;
Expand Down Expand Up @@ -7521,8 +7527,7 @@ static int raid5_run(struct mddev *mddev)

chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_opt(mddev->queue, chunk_size *
(conf->raid_disks - conf->max_degraded));
raid5_set_io_opt(conf);
mddev->queue->limits.raid_partial_stripes_expensive = 1;
/*
* We can only discard a whole stripe. It doesn't make sense to
Expand Down Expand Up @@ -8115,6 +8120,7 @@ static void end_reshape(struct r5conf *conf)
/ PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
raid5_set_io_opt(conf);
}
}
}
Expand Down

0 comments on commit 16ef510

Please sign in to comment.