Commit af50e20a authored by Jan Kara's avatar Jan Kara Committed by Song Liu
Browse files

md/raid0: Factor out helper for mapping and submitting a bio



Factor out helper function for mapping and submitting a bio out of
raid0_make_request(). We will use it later for submitting both parts of
a split bio.

Signed-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarYu Kuai <yukuai3@huawei.com>
Link: https://lore.kernel.org/r/20230814092720.3931-1-jack@suse.cz


Signed-off-by: default avatarSong Liu <song@kernel.org>
parent 6b2460e6
Loading
Loading
Loading
Loading
+40 −39
Original line number Diff line number Diff line
@@ -545,54 +545,21 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
	bio_endio(bio);
}

static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
{
	struct r0conf *conf = mddev->private;
	struct strip_zone *zone;
	struct md_rdev *tmp_dev;
	sector_t bio_sector;
	sector_t sector;
	sector_t orig_sector;
	unsigned chunk_sects;
	unsigned sectors;

	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
	    && md_flush_request(mddev, bio))
		return true;

	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
		raid0_handle_discard(mddev, bio);
		return true;
	}

	bio_sector = bio->bi_iter.bi_sector;
	sector = bio_sector;
	chunk_sects = mddev->chunk_sectors;

	sectors = chunk_sects -
		(likely(is_power_of_2(chunk_sects))
		 ? (sector & (chunk_sects-1))
		 : sector_div(sector, chunk_sects));

	/* Restore due to sector_div */
	sector = bio_sector;

	if (sectors < bio_sectors(bio)) {
		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
					      &mddev->bio_set);
		bio_chain(split, bio);
		submit_bio_noacct(bio);
		bio = split;
	}
	sector_t bio_sector = bio->bi_iter.bi_sector;
	sector_t sector = bio_sector;

	if (bio->bi_pool != &mddev->bio_set)
		md_account_bio(mddev, &bio);

	orig_sector = sector;
	zone = find_zone(mddev->private, &sector);
	switch (conf->layout) {
	case RAID0_ORIG_LAYOUT:
		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
		tmp_dev = map_sector(mddev, zone, bio_sector, &sector);
		break;
	case RAID0_ALT_MULTIZONE_LAYOUT:
		tmp_dev = map_sector(mddev, zone, sector, &sector);
@@ -600,13 +567,13 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
	default:
		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
		bio_io_error(bio);
		return true;
		return;
	}

	if (unlikely(is_rdev_broken(tmp_dev))) {
		bio_io_error(bio);
		md_error(mddev, tmp_dev);
		return true;
		return;
	}

	bio_set_dev(bio, tmp_dev->bdev);
@@ -618,6 +585,40 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
				      bio_sector);
	mddev_check_write_zeroes(mddev, bio);
	submit_bio_noacct(bio);
}

static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
{
	sector_t sector;
	unsigned chunk_sects;
	unsigned sectors;

	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
	    && md_flush_request(mddev, bio))
		return true;

	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
		raid0_handle_discard(mddev, bio);
		return true;
	}

	sector = bio->bi_iter.bi_sector;
	chunk_sects = mddev->chunk_sectors;

	sectors = chunk_sects -
		(likely(is_power_of_2(chunk_sects))
		 ? (sector & (chunk_sects-1))
		 : sector_div(sector, chunk_sects));

	if (sectors < bio_sectors(bio)) {
		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
					      &mddev->bio_set);
		bio_chain(split, bio);
		submit_bio_noacct(bio);
		bio = split;
	}

	raid0_map_submit_bio(mddev, bio);
	return true;
}