Commit 48332ff2 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'md-next' of...

Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-5.11/drivers

Pull MD changes from Song:

"Summary:
 1. Fix race condition in md_ioctl(), by Dae R. Jeong;
 2. Initialize read_slot properly for raid10, by Kevin Vigor;
 3. Code cleanup, by Pankaj Gupta;
 4. md-cluster resync/reshape fix, by Zhao Heming."

* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  md/cluster: fix deadlock when node is doing resync job
  md/cluster: block reshape with remote resync job
  md: use current request time as base for ktime comparisons
  md: add comments in md_flush_request()
  md: improve variable names in md_flush_request()
  md/raid10: initialize r10_bio->read_slot before use.
  md: fix a warning caused by a race between concurrent md_ioctl()s
parents 4d063e64 bca5b065
Loading
Loading
Loading
Loading
+38 −29
Original line number Diff line number Diff line
@@ -664,9 +664,27 @@ static void recv_daemon(struct md_thread *thread)
 * Takes the lock on the TOKEN lock resource so no other
 * node can communicate while the operation is underway.
 */
static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
static int lock_token(struct md_cluster_info *cinfo)
{
	int error, set_bit = 0;
	int error;

	error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
	if (error) {
		pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
				__func__, __LINE__, error);
	} else {
		/* Lock the receive sequence */
		mutex_lock(&cinfo->recv_mutex);
	}
	return error;
}

/* lock_comm()
 * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
 */
static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
{
	int rv, set_bit = 0;
	struct mddev *mddev = cinfo->mddev;

	/*
@@ -677,34 +695,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
	 */
	if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
				      &cinfo->state)) {
		error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
		rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
					      &cinfo->state);
		WARN_ON_ONCE(error);
		WARN_ON_ONCE(rv);
		md_wakeup_thread(mddev->thread);
		set_bit = 1;
	}
	error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
	if (set_bit)
		clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);

	if (error)
		pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
				__func__, __LINE__, error);

	/* Lock the receive sequence */
	mutex_lock(&cinfo->recv_mutex);
	return error;
}

/* lock_comm()
 * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
 */
static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
{
	wait_event(cinfo->wait,
		   !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));

	return lock_token(cinfo, mddev_locked);
	rv = lock_token(cinfo);
	if (set_bit)
		clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
	return rv;
}

static void unlock_comm(struct md_cluster_info *cinfo)
@@ -784,9 +787,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
{
	int ret;

	lock_comm(cinfo, mddev_locked);
	ret = lock_comm(cinfo, mddev_locked);
	if (!ret) {
		ret = __sendmsg(cinfo, cmsg);
		unlock_comm(cinfo);
	}
	return ret;
}

@@ -1061,7 +1066,7 @@ static int metadata_update_start(struct mddev *mddev)
		return 0;
	}

	ret = lock_token(cinfo, 1);
	ret = lock_token(cinfo);
	clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
	return ret;
}
@@ -1255,7 +1260,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
	int raid_slot = -1;

	md_update_sb(mddev, 1);
	lock_comm(cinfo, 1);
	if (lock_comm(cinfo, 1)) {
		pr_err("%s: lock_comm failed\n", __func__);
		return;
	}

	memset(&cmsg, 0, sizeof(cmsg));
	cmsg.type = cpu_to_le32(METADATA_UPDATED);
@@ -1407,7 +1415,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
	cmsg.type = cpu_to_le32(NEWDISK);
	memcpy(cmsg.uuid, uuid, 16);
	cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
	lock_comm(cinfo, 1);
	if (lock_comm(cinfo, 1))
		return -EAGAIN;
	ret = __sendmsg(cinfo, &cmsg);
	if (ret) {
		unlock_comm(cinfo);
+23 −10
Original line number Diff line number Diff line
@@ -639,7 +639,7 @@ static void md_submit_flush_data(struct work_struct *ws)
	 * could wait for this and below md_handle_request could wait for those
	 * bios because of suspend check
	 */
	mddev->last_flush = mddev->start_flush;
	mddev->prev_flush_start = mddev->start_flush;
	mddev->flush_bio = NULL;
	wake_up(&mddev->sb_wait);

@@ -660,13 +660,17 @@ static void md_submit_flush_data(struct work_struct *ws)
 */
bool md_flush_request(struct mddev *mddev, struct bio *bio)
{
	ktime_t start = ktime_get_boottime();
	ktime_t req_start = ktime_get_boottime();
	spin_lock_irq(&mddev->lock);
	/* flush requests wait until ongoing flush completes,
	 * hence coalescing all the pending requests.
	 */
	wait_event_lock_irq(mddev->sb_wait,
			    !mddev->flush_bio ||
			    ktime_after(mddev->last_flush, start),
			    ktime_before(req_start, mddev->prev_flush_start),
			    mddev->lock);
	if (!ktime_after(mddev->last_flush, start)) {
	/* new request after previous flush is completed */
	if (ktime_after(req_start, mddev->prev_flush_start)) {
		WARN_ON(mddev->flush_bio);
		mddev->flush_bio = bio;
		bio = NULL;
@@ -6949,8 +6953,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
		goto busy;

kick_rdev:
	if (mddev_is_clustered(mddev))
		md_cluster_ops->remove_disk(mddev, rdev);
	if (mddev_is_clustered(mddev)) {
		if (md_cluster_ops->remove_disk(mddev, rdev))
			goto busy;
	}

	md_kick_rdev_from_array(rdev);
	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -7279,6 +7285,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
		return -EINVAL;
	if (mddev->sync_thread ||
	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
	    test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
	    mddev->reshape_position != MaxSector)
		return -EBUSY;

@@ -7589,8 +7596,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
			err = -EBUSY;
			goto out;
		}
		WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
		set_bit(MD_CLOSING, &mddev->flags);
		if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
			mutex_unlock(&mddev->open_mutex);
			err = -EBUSY;
			goto out;
		}
		did_set_md_closing = true;
		mutex_unlock(&mddev->open_mutex);
		sync_blockdev(bdev);
@@ -9660,8 +9670,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
		}
	}

	if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
		update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
	if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
		ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
		if (ret)
			pr_warn("md: updating array disks failed. %d\n", ret);
	}

	/*
	 * Since mddev->delta_disks has already updated in update_raid_disks,
+3 −3
Original line number Diff line number Diff line
@@ -495,7 +495,7 @@ struct mddev {
	 */
	struct bio *flush_bio;
	atomic_t flush_pending;
	ktime_t start_flush, last_flush; /* last_flush is when the last completed
	ktime_t start_flush, prev_flush_start; /* prev_flush_start is when the previous completed
						* flush was started.
						*/
	struct work_struct flush_work;
+2 −1
Original line number Diff line number Diff line
@@ -1127,7 +1127,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
	struct md_rdev *err_rdev = NULL;
	gfp_t gfp = GFP_NOIO;

	if (r10_bio->devs[slot].rdev) {
	if (slot >= 0 && r10_bio->devs[slot].rdev) {
		/*
		 * This is an error retry, but we cannot
		 * safely dereference the rdev in the r10_bio,
@@ -1508,6 +1508,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
	r10_bio->mddev = mddev;
	r10_bio->sector = bio->bi_iter.bi_sector;
	r10_bio->state = 0;
	r10_bio->read_slot = -1;
	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->geo.raid_disks);

	if (bio_data_dir(bio) == READ)