Commit a2daeab5 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'md-next' of...

Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-5.18/drivers

Pull MD fixes from Song:

"Most of these changes are minor fixes and clean-ups."

* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  md: use msleep() in md_notify_reboot()
  lib/raid6: Include <asm/ppc-opcode.h> for VPERMXOR
  lib/raid6/test/Makefile: Use $(pound) instead of \# for Make 4.3
  lib/raid6/test: fix multiple definition linking error
  md: raid1/raid10: drop pending_cnt
parents a7637069 7d959f6e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -9582,7 +9582,7 @@ static int md_notify_reboot(struct notifier_block *this,
	 * driver, we do want to have a safe RAID driver ...
	 */
	if (need_delay)
		mdelay(1000*1);
		msleep(1000);

	return NOTIFY_DONE;
}
+5 −0
Original line number Diff line number Diff line
@@ -28,6 +28,11 @@ struct resync_pages {
	struct page	*pages[RESYNC_PAGES];
};

struct raid1_plug_cb {
	struct blk_plug_cb	cb;
	struct bio_list		pending;
};

static void rbio_pool_free(void *rbio, void *data)
{
	kfree(rbio);
+0 −11
Original line number Diff line number Diff line
@@ -824,7 +824,6 @@ static void flush_pending_writes(struct r1conf *conf)
		struct bio *bio;

		bio = bio_list_get(&conf->pending_bio_list);
		conf->pending_count = 0;
		spin_unlock_irq(&conf->device_lock);

		/*
@@ -1167,12 +1166,6 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
	bio_put(behind_bio);
}

struct raid1_plug_cb {
	struct blk_plug_cb	cb;
	struct bio_list		pending;
	int			pending_cnt;
};

static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
@@ -1184,7 +1177,6 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
	if (from_schedule || current->bio_list) {
		spin_lock_irq(&conf->device_lock);
		bio_list_merge(&conf->pending_bio_list, &plug->pending);
		conf->pending_count += plug->pending_cnt;
		spin_unlock_irq(&conf->device_lock);
		wake_up(&conf->wait_barrier);
		md_wakeup_thread(mddev->thread);
@@ -1588,11 +1580,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
			plug = NULL;
		if (plug) {
			bio_list_add(&plug->pending, mbio);
			plug->pending_cnt++;
		} else {
			spin_lock_irqsave(&conf->device_lock, flags);
			bio_list_add(&conf->pending_bio_list, mbio);
			conf->pending_count++;
			spin_unlock_irqrestore(&conf->device_lock, flags);
			md_wakeup_thread(mddev->thread);
		}
@@ -3058,7 +3048,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
	init_waitqueue_head(&conf->wait_barrier);

	bio_list_init(&conf->pending_bio_list);
	conf->pending_count = 0;
	conf->recovery_disabled = mddev->recovery_disabled - 1;

	err = -EIO;
+0 −1
Original line number Diff line number Diff line
@@ -87,7 +87,6 @@ struct r1conf {

	/* queue pending writes to be submitted on unplug */
	struct bio_list		pending_bio_list;
	int			pending_count;

	/* for use when syncing mirrors:
	 * We don't allow both normal IO and resync/recovery IO at
+3 −14
Original line number Diff line number Diff line
@@ -861,7 +861,6 @@ static void flush_pending_writes(struct r10conf *conf)
		struct bio *bio;

		bio = bio_list_get(&conf->pending_bio_list);
		conf->pending_count = 0;
		spin_unlock_irq(&conf->device_lock);

		/*
@@ -1054,16 +1053,9 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
		return rdev->new_data_offset;
}

struct raid10_plug_cb {
	struct blk_plug_cb	cb;
	struct bio_list		pending;
	int			pending_cnt;
};

static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
						   cb);
	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
	struct mddev *mddev = plug->cb.data;
	struct r10conf *conf = mddev->private;
	struct bio *bio;
@@ -1071,7 +1063,6 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
	if (from_schedule || current->bio_list) {
		spin_lock_irq(&conf->device_lock);
		bio_list_merge(&conf->pending_bio_list, &plug->pending);
		conf->pending_count += plug->pending_cnt;
		spin_unlock_irq(&conf->device_lock);
		wake_up(&conf->wait_barrier);
		md_wakeup_thread(mddev->thread);
@@ -1238,7 +1229,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
	unsigned long flags;
	struct blk_plug_cb *cb;
	struct raid10_plug_cb *plug = NULL;
	struct raid1_plug_cb *plug = NULL;
	struct r10conf *conf = mddev->private;
	struct md_rdev *rdev;
	int devnum = r10_bio->devs[n_copy].devnum;
@@ -1280,16 +1271,14 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,

	cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
	if (cb)
		plug = container_of(cb, struct raid10_plug_cb, cb);
		plug = container_of(cb, struct raid1_plug_cb, cb);
	else
		plug = NULL;
	if (plug) {
		bio_list_add(&plug->pending, mbio);
		plug->pending_cnt++;
	} else {
		spin_lock_irqsave(&conf->device_lock, flags);
		bio_list_add(&conf->pending_bio_list, mbio);
		conf->pending_count++;
		spin_unlock_irqrestore(&conf->device_lock, flags);
		md_wakeup_thread(mddev->thread);
	}
Loading