Commit 5626196a authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'md-next' of...

Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.2/block

Pull MD fixes from Song.

* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  md/raid1: stop mdx_raid1 thread when raid1 array run failed
  md/raid5: use bdev_write_cache instead of open coding it
  md: fix a crash in mempool_free
  md/raid0, raid10: Don't set discard sectors for request queue
  md/bitmap: Fix bitmap chunk size overflow issues
  md: introduce md_ro_state
  md: factor out __md_set_array_info()
  lib/raid6: drop RAID6_USE_EMPTY_ZERO_PAGE
  raid5-cache: use try_cmpxchg in r5l_wake_reclaim
  drivers/md/md-bitmap: check the return value of md_bitmap_get_counter()
parents 4f8126bb b611ad14
Loading
Loading
Loading
Loading
+27 −20
Original line number Diff line number Diff line
@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
	sb = kmap_atomic(bitmap->storage.sb_page);
	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
	pr_debug("       version: %d\n", le32_to_cpu(sb->version));
	pr_debug("       version: %u\n", le32_to_cpu(sb->version));
	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
		 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
		 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
	pr_debug("events cleared: %llu\n",
		 (unsigned long long) le64_to_cpu(sb->events_cleared));
	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
	pr_debug("     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
	pr_debug("  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
	pr_debug("     chunksize: %u B\n", le32_to_cpu(sb->chunksize));
	pr_debug("  daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
	pr_debug("     sync size: %llu KB\n",
		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
	pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
	pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
	kunmap_atomic(sb);
}

@@ -2105,7 +2105,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
			bytes = DIV_ROUND_UP(chunks, 8);
			if (!bitmap->mddev->bitmap_info.external)
				bytes += sizeof(bitmap_super_t);
		} while (bytes > (space << 9));
		} while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
			(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
	} else
		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;

@@ -2150,7 +2151,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
	bitmap->counts.missing_pages = pages;
	bitmap->counts.chunkshift = chunkshift;
	bitmap->counts.chunks = chunks;
	bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
	bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
						     BITMAP_BLOCK_SHIFT);

	blocks = min(old_counts.chunks << old_counts.chunkshift,
@@ -2176,8 +2177,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
				bitmap->counts.missing_pages = old_counts.pages;
				bitmap->counts.chunkshift = old_counts.chunkshift;
				bitmap->counts.chunks = old_counts.chunks;
				bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
									     BITMAP_BLOCK_SHIFT);
				bitmap->mddev->bitmap_info.chunksize =
					1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
				blocks = old_counts.chunks << old_counts.chunkshift;
				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
				break;
@@ -2195,10 +2196,12 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,

		if (set) {
			bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
			if (bmc_new) {
				if (*bmc_new == 0) {
					/* need to set on-disk bits too. */
					sector_t end = block + new_blocks;
					sector_t start = block >> chunkshift;

					start <<= chunkshift;
					while (start < end) {
						md_bitmap_file_set_bit(bitmap, block);
@@ -2209,6 +2212,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
					md_bitmap_set_pending(&bitmap->counts, block);
				}
				*bmc_new |= NEEDED_MASK;
			}
			if (new_blocks < old_blocks)
				old_blocks = new_blocks;
		}
@@ -2534,6 +2538,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len)
	if (csize < 512 ||
	    !is_power_of_2(csize))
		return -EINVAL;
	if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
		sizeof(((bitmap_super_t *)0)->chunksize))))
		return -EOVERFLOW;
	mddev->bitmap_info.chunksize = csize;
	return len;
}
+123 −103
Original line number Diff line number Diff line
@@ -93,6 +93,18 @@ static int remove_and_add_spares(struct mddev *mddev,
				 struct md_rdev *this);
static void mddev_detach(struct mddev *mddev);

enum md_ro_state {
	MD_RDWR,
	MD_RDONLY,
	MD_AUTO_READ,
	MD_MAX_STATE
};

static bool md_is_rdwr(struct mddev *mddev)
{
	return (mddev->ro == MD_RDWR);
}

/*
 * Default number of read corrections we'll attempt on an rdev
 * before ejecting it from the array. We divide the read error
@@ -444,7 +456,7 @@ static void md_submit_bio(struct bio *bio)

	bio = bio_split_to_limits(bio);

	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
	if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
		if (bio_sectors(bio) != 0)
			bio->bi_status = BLK_STS_IOERR;
		bio_endio(bio);
@@ -509,13 +521,14 @@ static void md_end_flush(struct bio *bio)
	struct md_rdev *rdev = bio->bi_private;
	struct mddev *mddev = rdev->mddev;

	bio_put(bio);

	rdev_dec_pending(rdev, mddev);

	if (atomic_dec_and_test(&mddev->flush_pending)) {
		/* The pre-request flush has finished */
		queue_work(md_wq, &mddev->flush_work);
	}
	bio_put(bio);
}

static void md_submit_flush_data(struct work_struct *ws);
@@ -913,10 +926,12 @@ static void super_written(struct bio *bio)
	} else
		clear_bit(LastDev, &rdev->flags);

	bio_put(bio);

	rdev_dec_pending(rdev, mddev);

	if (atomic_dec_and_test(&mddev->pending_writes))
		wake_up(&mddev->sb_wait);
	rdev_dec_pending(rdev, mddev);
	bio_put(bio);
}

void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
@@ -2639,7 +2654,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
	int any_badblocks_changed = 0;
	int ret = -1;

	if (mddev->ro) {
	if (!md_is_rdwr(mddev)) {
		if (force_change)
			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
		return;
@@ -3901,7 +3916,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
		goto out_unlock;
	}
	rv = -EROFS;
	if (mddev->ro)
	if (!md_is_rdwr(mddev))
		goto out_unlock;

	/* request to change the personality.  Need to ensure:
@@ -4107,7 +4122,7 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
	if (mddev->pers) {
		if (mddev->pers->check_reshape == NULL)
			err = -EBUSY;
		else if (mddev->ro)
		else if (!md_is_rdwr(mddev))
			err = -EROFS;
		else {
			mddev->new_layout = n;
@@ -4216,7 +4231,7 @@ chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
	if (mddev->pers) {
		if (mddev->pers->check_reshape == NULL)
			err = -EBUSY;
		else if (mddev->ro)
		else if (!md_is_rdwr(mddev))
			err = -EROFS;
		else {
			mddev->new_chunk_sectors = n >> 9;
@@ -4339,13 +4354,13 @@ array_state_show(struct mddev *mddev, char *page)

	if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
		switch(mddev->ro) {
		case 1:
		case MD_RDONLY:
			st = readonly;
			break;
		case 2:
		case MD_AUTO_READ:
			st = read_auto;
			break;
		case 0:
		case MD_RDWR:
			spin_lock(&mddev->lock);
			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
				st = write_pending;
@@ -4381,7 +4396,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
	int err = 0;
	enum array_state st = match_word(buf, array_states);

	if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
	if (mddev->pers && (st == active || st == clean) &&
	    mddev->ro != MD_RDONLY) {
		/* don't take reconfig_mutex when toggling between
		 * clean and active
		 */
@@ -4425,23 +4441,23 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
		if (mddev->pers)
			err = md_set_readonly(mddev, NULL);
		else {
			mddev->ro = 1;
			mddev->ro = MD_RDONLY;
			set_disk_ro(mddev->gendisk, 1);
			err = do_md_run(mddev);
		}
		break;
	case read_auto:
		if (mddev->pers) {
			if (mddev->ro == 0)
			if (md_is_rdwr(mddev))
				err = md_set_readonly(mddev, NULL);
			else if (mddev->ro == 1)
			else if (mddev->ro == MD_RDONLY)
				err = restart_array(mddev);
			if (err == 0) {
				mddev->ro = 2;
				mddev->ro = MD_AUTO_READ;
				set_disk_ro(mddev->gendisk, 0);
			}
		} else {
			mddev->ro = 2;
			mddev->ro = MD_AUTO_READ;
			err = do_md_run(mddev);
		}
		break;
@@ -4466,7 +4482,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
			wake_up(&mddev->sb_wait);
			err = 0;
		} else {
			mddev->ro = 0;
			mddev->ro = MD_RDWR;
			set_disk_ro(mddev->gendisk, 0);
			err = do_md_run(mddev);
		}
@@ -4765,7 +4781,7 @@ action_show(struct mddev *mddev, char *page)
	if (test_bit(MD_RECOVERY_FROZEN, &recovery))
		type = "frozen";
	else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
	    (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
		if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
			type = "reshape";
		else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
@@ -4851,11 +4867,11 @@ action_store(struct mddev *mddev, const char *page, size_t len)
		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
	}
	if (mddev->ro == 2) {
	if (mddev->ro == MD_AUTO_READ) {
		/* A write to sync_action is enough to justify
		 * canceling read-auto mode
		 */
		mddev->ro = 0;
		mddev->ro = MD_RDWR;
		md_wakeup_thread(mddev->sync_thread);
	}
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5083,8 +5099,7 @@ max_sync_store(struct mddev *mddev, const char *buf, size_t len)
			goto out_unlock;

		err = -EBUSY;
		if (max < mddev->resync_max &&
		    mddev->ro == 0 &&
		if (max < mddev->resync_max && md_is_rdwr(mddev) &&
		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
			goto out_unlock;

@@ -5813,8 +5828,8 @@ int md_run(struct mddev *mddev)
			continue;
		sync_blockdev(rdev->bdev);
		invalidate_bdev(rdev->bdev);
		if (mddev->ro != 1 && rdev_read_only(rdev)) {
			mddev->ro = 1;
		if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
			mddev->ro = MD_RDONLY;
			if (mddev->gendisk)
				set_disk_ro(mddev->gendisk, 1);
		}
@@ -5917,8 +5932,8 @@ int md_run(struct mddev *mddev)

	mddev->ok_start_degraded = start_dirty_degraded;

	if (start_readonly && mddev->ro == 0)
		mddev->ro = 2; /* read-only, but switch on first write */
	if (start_readonly && md_is_rdwr(mddev))
		mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */

	err = pers->run(mddev);
	if (err)
@@ -5996,8 +6011,8 @@ int md_run(struct mddev *mddev)
		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
		mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
		mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
		mddev->ro = 0;
	} else if (mddev->ro == MD_AUTO_READ)
		mddev->ro = MD_RDWR;

	atomic_set(&mddev->max_corr_read_errors,
		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
@@ -6015,7 +6030,7 @@ int md_run(struct mddev *mddev)
		if (rdev->raid_disk >= 0)
			sysfs_link_rdev(mddev, rdev); /* failure here is OK */

	if (mddev->degraded && !mddev->ro)
	if (mddev->degraded && md_is_rdwr(mddev))
		/* This ensures that recovering status is reported immediately
		 * via sysfs - until a lack of spares is confirmed.
		 */
@@ -6105,7 +6120,7 @@ static int restart_array(struct mddev *mddev)
		return -ENXIO;
	if (!mddev->pers)
		return -EINVAL;
	if (!mddev->ro)
	if (md_is_rdwr(mddev))
		return -EBUSY;

	rcu_read_lock();
@@ -6124,7 +6139,7 @@ static int restart_array(struct mddev *mddev)
		return -EROFS;

	mddev->safemode = 0;
	mddev->ro = 0;
	mddev->ro = MD_RDWR;
	set_disk_ro(disk, 0);
	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
	/* Kick recovery or resync if necessary */
@@ -6151,7 +6166,7 @@ static void md_clean(struct mddev *mddev)
	mddev->clevel[0] = 0;
	mddev->flags = 0;
	mddev->sb_flags = 0;
	mddev->ro = 0;
	mddev->ro = MD_RDWR;
	mddev->metadata_type[0] = 0;
	mddev->chunk_sectors = 0;
	mddev->ctime = mddev->utime = 0;
@@ -6203,7 +6218,7 @@ static void __md_stop_writes(struct mddev *mddev)
	}
	md_bitmap_flush(mddev);

	if (mddev->ro == 0 &&
	if (md_is_rdwr(mddev) &&
	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
	     mddev->sb_flags)) {
		/* mark array as shutdown cleanly */
@@ -6312,9 +6327,9 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
		__md_stop_writes(mddev);

		err  = -ENXIO;
		if (mddev->ro==1)
		if (mddev->ro == MD_RDONLY)
			goto out;
		mddev->ro = 1;
		mddev->ro = MD_RDONLY;
		set_disk_ro(mddev->gendisk, 1);
		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -6371,7 +6386,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
		return -EBUSY;
	}
	if (mddev->pers) {
		if (mddev->ro)
		if (!md_is_rdwr(mddev))
			set_disk_ro(disk, 0);

		__md_stop_writes(mddev);
@@ -6388,8 +6403,8 @@ static int do_md_stop(struct mddev *mddev, int mode,
		mutex_unlock(&mddev->open_mutex);
		mddev->changed = 1;

		if (mddev->ro)
			mddev->ro = 0;
		if (!md_is_rdwr(mddev))
			mddev->ro = MD_RDWR;
	} else
		mutex_unlock(&mddev->open_mutex);
	/*
@@ -7204,7 +7219,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
	    mddev->sync_thread)
		return -EBUSY;
	if (mddev->ro)
	if (!md_is_rdwr(mddev))
		return -EROFS;

	rdev_for_each(rdev, mddev) {
@@ -7234,7 +7249,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
	/* change the number of raid disks */
	if (mddev->pers->check_reshape == NULL)
		return -EINVAL;
	if (mddev->ro)
	if (!md_is_rdwr(mddev))
		return -EROFS;
	if (raid_disks <= 0 ||
	    (mddev->max_disks && raid_disks >= mddev->max_disks))
@@ -7464,6 +7479,40 @@ static inline bool md_ioctl_valid(unsigned int cmd)
	}
}

static int __md_set_array_info(struct mddev *mddev, void __user *argp)
{
	mdu_array_info_t info;
	int err;

	if (!argp)
		memset(&info, 0, sizeof(info));
	else if (copy_from_user(&info, argp, sizeof(info)))
		return -EFAULT;

	if (mddev->pers) {
		err = update_array_info(mddev, &info);
		if (err)
			pr_warn("md: couldn't update array info. %d\n", err);
		return err;
	}

	if (!list_empty(&mddev->disks)) {
		pr_warn("md: array %s already has disks!\n", mdname(mddev));
		return -EBUSY;
	}

	if (mddev->raid_disks) {
		pr_warn("md: array %s already initialised!\n", mdname(mddev));
		return -EBUSY;
	}

	err = md_set_array_info(mddev, &info);
	if (err)
		pr_warn("md: couldn't set array info. %d\n", err);

	return err;
}

static int md_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
@@ -7569,36 +7618,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
	}

	if (cmd == SET_ARRAY_INFO) {
		mdu_array_info_t info;
		if (!arg)
			memset(&info, 0, sizeof(info));
		else if (copy_from_user(&info, argp, sizeof(info))) {
			err = -EFAULT;
			goto unlock;
		}
		if (mddev->pers) {
			err = update_array_info(mddev, &info);
			if (err) {
				pr_warn("md: couldn't update array info. %d\n", err);
				goto unlock;
			}
			goto unlock;
		}
		if (!list_empty(&mddev->disks)) {
			pr_warn("md: array %s already has disks!\n", mdname(mddev));
			err = -EBUSY;
			goto unlock;
		}
		if (mddev->raid_disks) {
			pr_warn("md: array %s already initialised!\n", mdname(mddev));
			err = -EBUSY;
			goto unlock;
		}
		err = md_set_array_info(mddev, &info);
		if (err) {
			pr_warn("md: couldn't set array info. %d\n", err);
			goto unlock;
		}
		err = __md_set_array_info(mddev, argp);
		goto unlock;
	}

@@ -7658,9 +7678,12 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
	 * The remaining ioctls are changing the state of the
	 * superblock, so we do not allow them on read-only arrays.
	 */
	if (mddev->ro && mddev->pers) {
		if (mddev->ro == 2) {
			mddev->ro = 0;
	if (!md_is_rdwr(mddev) && mddev->pers) {
		if (mddev->ro != MD_AUTO_READ) {
			err = -EROFS;
			goto unlock;
		}
		mddev->ro = MD_RDWR;
		sysfs_notify_dirent_safe(mddev->sysfs_state);
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
		/* mddev_unlock will wake thread */
@@ -7674,10 +7697,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
				   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
			mddev_lock_nointr(mddev);
		}
		} else {
			err = -EROFS;
			goto unlock;
		}
	}

	switch (cmd) {
@@ -7763,11 +7782,11 @@ static int md_set_read_only(struct block_device *bdev, bool ro)
	 * Transitioning to read-auto need only happen for arrays that call
	 * md_write_start and which are not ready for writes yet.
	 */
	if (!ro && mddev->ro == 1 && mddev->pers) {
	if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
		err = restart_array(mddev);
		if (err)
			goto out_unlock;
		mddev->ro = 2;
		mddev->ro = MD_AUTO_READ;
	}

out_unlock:
@@ -8241,9 +8260,9 @@ static int md_seq_show(struct seq_file *seq, void *v)
		seq_printf(seq, "%s : %sactive", mdname(mddev),
						mddev->pers ? "" : "in");
		if (mddev->pers) {
			if (mddev->ro==1)
			if (mddev->ro == MD_RDONLY)
				seq_printf(seq, " (read-only)");
			if (mddev->ro==2)
			if (mddev->ro == MD_AUTO_READ)
				seq_printf(seq, " (auto-read-only)");
			seq_printf(seq, " %s", mddev->pers->name);
		}
@@ -8502,10 +8521,10 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
	if (bio_data_dir(bi) != WRITE)
		return true;

	BUG_ON(mddev->ro == 1);
	if (mddev->ro == 2) {
	BUG_ON(mddev->ro == MD_RDONLY);
	if (mddev->ro == MD_AUTO_READ) {
		/* need to switch to read/write */
		mddev->ro = 0;
		mddev->ro = MD_RDWR;
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
		md_wakeup_thread(mddev->thread);
		md_wakeup_thread(mddev->sync_thread);
@@ -8556,7 +8575,7 @@ void md_write_inc(struct mddev *mddev, struct bio *bi)
{
	if (bio_data_dir(bi) != WRITE)
		return;
	WARN_ON_ONCE(mddev->in_sync || mddev->ro);
	WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
	percpu_ref_get(&mddev->writes_pending);
}
EXPORT_SYMBOL(md_write_inc);
@@ -8661,7 +8680,7 @@ void md_allow_write(struct mddev *mddev)
{
	if (!mddev->pers)
		return;
	if (mddev->ro)
	if (!md_is_rdwr(mddev))
		return;
	if (!mddev->pers->sync_request)
		return;
@@ -8709,7 +8728,7 @@ void md_do_sync(struct md_thread *thread)
	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
	    test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
		return;
	if (mddev->ro) {/* never try to sync a read-only array */
	if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
		return;
	}
@@ -9178,7 +9197,7 @@ static int remove_and_add_spares(struct mddev *mddev,
		if (test_bit(Faulty, &rdev->flags))
			continue;
		if (!test_bit(Journal, &rdev->flags)) {
			if (mddev->ro &&
			if (!md_is_rdwr(mddev) &&
			    !(rdev->saved_raid_disk >= 0 &&
			      !test_bit(Bitmap_sync, &rdev->flags)))
				continue;
@@ -9278,7 +9297,8 @@ void md_check_recovery(struct mddev *mddev)
		flush_signals(current);
	}

	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
	if (!md_is_rdwr(mddev) &&
	    !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
		return;
	if ( ! (
		(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
@@ -9297,7 +9317,7 @@ void md_check_recovery(struct mddev *mddev)
		if (!mddev->external && mddev->safemode == 1)
			mddev->safemode = 0;

		if (mddev->ro) {
		if (!md_is_rdwr(mddev)) {
			struct md_rdev *rdev;
			if (!mddev->external && mddev->in_sync)
				/* 'Blocked' flag not needed as failed devices
+0 −1
Original line number Diff line number Diff line
@@ -398,7 +398,6 @@ static int raid0_run(struct mddev *mddev)

		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);

		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
		blk_queue_io_opt(mddev->queue,
+1 −0
Original line number Diff line number Diff line
@@ -3159,6 +3159,7 @@ static int raid1_run(struct mddev *mddev)
	 * RAID1 needs at least one disk in active
	 */
	if (conf->raid_disks - mddev->degraded < 1) {
		md_unregister_thread(&conf->thread);
		ret = -EINVAL;
		goto abort;
	}
+0 −2
Original line number Diff line number Diff line
@@ -4145,8 +4145,6 @@ static int raid10_run(struct mddev *mddev)
	conf->thread = NULL;

	if (mddev->queue) {
		blk_queue_max_discard_sectors(mddev->queue,
					      UINT_MAX);
		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
		raid10_set_io_opt(conf);
Loading