Commit e82ed3a4 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

md/raid6: refactor raid5_read_one_chunk



Refactor raid5_read_one_chunk so that all simple checks are done
before allocating the bio.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarSong Liu <song@kernel.org>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6a596569
Loading
Loading
Loading
Loading
+45 −63
Original line number Diff line number Diff line
@@ -5393,91 +5393,73 @@ static void raid5_align_endio(struct bio *bi)
static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
{
	struct r5conf *conf = mddev->private;
	int dd_idx;
	struct bio* align_bi;
	struct bio *align_bio;
	struct md_rdev *rdev;
	sector_t end_sector;
	sector_t sector, end_sector, first_bad;
	int bad_sectors, dd_idx;

	if (!in_chunk_boundary(mddev, raid_bio)) {
		pr_debug("%s: non aligned\n", __func__);
		return 0;
	}
	/*
	 * use bio_clone_fast to make a copy of the bio
	 */
	align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
	if (!align_bi)
		return 0;
	/*
	 *   set bi_end_io to a new function, and set bi_private to the
	 *     original bio.
	 */
	align_bi->bi_end_io  = raid5_align_endio;
	align_bi->bi_private = raid_bio;
	/*
	 *	compute position
	 */
	align_bi->bi_iter.bi_sector =
		raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
				     0, &dd_idx, NULL);

	end_sector = bio_end_sector(align_bi);
	sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
				      &dd_idx, NULL);
	end_sector = bio_end_sector(raid_bio);

	rcu_read_lock();
	if (r5c_big_stripe_cached(conf, sector))
		goto out_rcu_unlock;

	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
	if (!rdev || test_bit(Faulty, &rdev->flags) ||
	    rdev->recovery_offset < end_sector) {
		rdev = rcu_dereference(conf->disks[dd_idx].rdev);
		if (rdev &&
		    (test_bit(Faulty, &rdev->flags) ||
		if (!rdev)
			goto out_rcu_unlock;
		if (test_bit(Faulty, &rdev->flags) ||
		    !(test_bit(In_sync, &rdev->flags) ||
		      rdev->recovery_offset >= end_sector)))
			rdev = NULL;
		      rdev->recovery_offset >= end_sector))
			goto out_rcu_unlock;
	}

	if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) {
	atomic_inc(&rdev->nr_pending);
	rcu_read_unlock();
		bio_put(align_bi);
		return 0;
	}

	if (rdev) {
		sector_t first_bad;
		int bad_sectors;
	align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
	bio_set_dev(align_bio, rdev->bdev);
	align_bio->bi_end_io = raid5_align_endio;
	align_bio->bi_private = raid_bio;
	align_bio->bi_iter.bi_sector = sector;

		atomic_inc(&rdev->nr_pending);
		rcu_read_unlock();
	raid_bio->bi_next = (void *)rdev;
		bio_set_dev(align_bi, rdev->bdev);

		if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
				bio_sectors(align_bi),
				&first_bad, &bad_sectors)) {
			bio_put(align_bi);
	if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad,
			&bad_sectors)) {
		bio_put(align_bio);
		rdev_dec_pending(rdev, mddev);
		return 0;
	}

	/* No reshape active, so we can trust rdev->data_offset */
		align_bi->bi_iter.bi_sector += rdev->data_offset;
	align_bio->bi_iter.bi_sector += rdev->data_offset;

	spin_lock_irq(&conf->device_lock);
		wait_event_lock_irq(conf->wait_for_quiescent,
				    conf->quiesce == 0,
	wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
			    conf->device_lock);
	atomic_inc(&conf->active_aligned_reads);
	spin_unlock_irq(&conf->device_lock);

	if (mddev->gendisk)
			trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk),
		trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
				      raid_bio->bi_iter.bi_sector);
		submit_bio_noacct(align_bi);
	submit_bio_noacct(align_bio);
	return 1;
	} else {

out_rcu_unlock:
	rcu_read_unlock();
		bio_put(align_bi);
	return 0;
}
}

static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{