Commit 16ef5101 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

md: update the optimal I/O size on reshape



The raid5 and raid10 drivers currently update the read-ahead size,
but not the optimal I/O size on reshape.  To prepare for deriving the
read-ahead size from the optimal I/O size make sure it is updated
as well.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 55b2598e
Loading
Loading
Loading
Loading
+14 −8
Original line number Diff line number Diff line
@@ -3703,10 +3703,20 @@ static struct r10conf *setup_conf(struct mddev *mddev)
	return ERR_PTR(err);
}

static void raid10_set_io_opt(struct r10conf *conf)
{
	int raid_disks = conf->geo.raid_disks;

	if (!(conf->geo.raid_disks % conf->geo.near_copies))
		raid_disks /= conf->geo.near_copies;
	blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
			 raid_disks);
}

static int raid10_run(struct mddev *mddev)
{
	struct r10conf *conf;
	int i, disk_idx, chunk_size;
	int i, disk_idx;
	struct raid10_info *disk;
	struct md_rdev *rdev;
	sector_t size;
@@ -3742,18 +3752,13 @@ static int raid10_run(struct mddev *mddev)
	mddev->thread = conf->thread;
	conf->thread = NULL;

	chunk_size = mddev->chunk_sectors << 9;
	if (mddev->queue) {
		blk_queue_max_discard_sectors(mddev->queue,
					      mddev->chunk_sectors);
		blk_queue_max_write_same_sectors(mddev->queue, 0);
		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
		blk_queue_io_min(mddev->queue, chunk_size);
		if (conf->geo.raid_disks % conf->geo.near_copies)
			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
		else
			blk_queue_io_opt(mddev->queue, chunk_size *
					 (conf->geo.raid_disks / conf->geo.near_copies));
		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
		raid10_set_io_opt(conf);
	}

	rdev_for_each(rdev, mddev) {
@@ -4727,6 +4732,7 @@ static void end_reshape(struct r10conf *conf)
		stripe /= conf->geo.near_copies;
		if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
			conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
		raid10_set_io_opt(conf);
	}
	conf->fullsync = 0;
}
+8 −2
Original line number Diff line number Diff line
@@ -7232,6 +7232,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
	return 0;
}

static void raid5_set_io_opt(struct r5conf *conf)
{
	blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
			 (conf->raid_disks - conf->max_degraded));
}

static int raid5_run(struct mddev *mddev)
{
	struct r5conf *conf;
@@ -7521,8 +7527,7 @@ static int raid5_run(struct mddev *mddev)

		chunk_size = mddev->chunk_sectors << 9;
		blk_queue_io_min(mddev->queue, chunk_size);
		blk_queue_io_opt(mddev->queue, chunk_size *
				 (conf->raid_disks - conf->max_degraded));
		raid5_set_io_opt(conf);
		mddev->queue->limits.raid_partial_stripes_expensive = 1;
		/*
		 * We can only discard a whole stripe. It doesn't make sense to
@@ -8115,6 +8120,7 @@ static void end_reshape(struct r5conf *conf)
						   / PAGE_SIZE);
			if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
				conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
			raid5_set_io_opt(conf);
		}
	}
}