Unverified Commit 7d0404f2 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4119 [sync] PR-3975: md/raid1-10: limit the number of plugged bio

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/3975 
 
PR sync from: Li Lingfeng <lilingfeng3@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/YOSNV6J3UWQ7BBXALQTVYFDK7N7DNKHI/ 
Too many plugged bios may affect IO performance and cost too much
memory,
so limit the number of it.

Li Lingfeng (1):
  Revert "md/raid10: fix softlockup in raid10_unplug"

Mariusz Tkaczyk (2):
  md: drop queue limitation for RAID1 and RAID10
  md: raid1/raid10: drop pending_cnt

Yu Kuai (8):
  md/raid10: prevent soft lockup while flush writes
  md/raid1-10: factor out a helper to add bio to plug
  md/raid1-10: factor out a helper to submit normal write
  md/raid1-10: submit write io directly if bitmap is not enabled
  md/md-bitmap: add a new helper to unplug bitmap asynchrously
  md/raid1-10: don't handle pluged bio by daemon thread
  md/raid1-10: limit the number of plugged bio
  md/raid1-10: fix casting from randomized structure in
    raid1_submit_write()


-- 
2.31.1
 
https://gitee.com/src-openeuler/kernel/issues/I8UKFJ 
 
Link:https://gitee.com/openeuler/kernel/pulls/4119

 

Reviewed-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents aa21e010 83df3570
Loading
Loading
Loading
Loading
+30 −3
Original line number Diff line number Diff line
@@ -1001,7 +1001,6 @@ static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
	return set;
}


/* this gets called when the md device is ready to unplug its underlying
 * (slave) device queues -- before we let any writes go down, we need to
 * sync the dirty pages of the bitmap file to disk */
@@ -1011,8 +1010,7 @@ void md_bitmap_unplug(struct bitmap *bitmap)
	int dirty, need_write;
	int writing = 0;

	if (!bitmap || !bitmap->storage.filemap ||
	    test_bit(BITMAP_STALE, &bitmap->flags))
	if (!md_bitmap_enabled(bitmap))
		return;

	/* look at each page to see if there are any set bits that need to be
@@ -1041,6 +1039,35 @@ void md_bitmap_unplug(struct bitmap *bitmap)
}
EXPORT_SYMBOL(md_bitmap_unplug);

struct bitmap_unplug_work {
	struct work_struct work;
	struct bitmap *bitmap;
	struct completion *done;
};

static void md_bitmap_unplug_fn(struct work_struct *work)
{
	struct bitmap_unplug_work *unplug_work =
		container_of(work, struct bitmap_unplug_work, work);

	md_bitmap_unplug(unplug_work->bitmap);
	complete(unplug_work->done);
}

void md_bitmap_unplug_async(struct bitmap *bitmap)
{
	DECLARE_COMPLETION_ONSTACK(done);
	struct bitmap_unplug_work unplug_work;

	INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn);
	unplug_work.bitmap = bitmap;
	unplug_work.done = &done;

	queue_work(md_bitmap_wq, &unplug_work.work);
	wait_for_completion(&done);
}
EXPORT_SYMBOL(md_bitmap_unplug_async);

static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
+8 −0
Original line number Diff line number Diff line
@@ -264,6 +264,7 @@ void md_bitmap_sync_with_cluster(struct mddev *mddev,
				 sector_t new_lo, sector_t new_hi);

void md_bitmap_unplug(struct bitmap *bitmap);
void md_bitmap_unplug_async(struct bitmap *bitmap);
void md_bitmap_daemon_work(struct mddev *mddev);

int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
@@ -273,6 +274,13 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
			     sector_t *lo, sector_t *hi, bool clear_bits);
void md_bitmap_free(struct bitmap *bitmap);
void md_bitmap_wait_behind_writes(struct mddev *mddev);

static inline bool md_bitmap_enabled(struct bitmap *bitmap)
{
	return bitmap && bitmap->storage.filemap &&
	       !test_bit(BITMAP_STALE, &bitmap->flags);
}

#endif

#endif
+9 −0
Original line number Diff line number Diff line
@@ -82,6 +82,7 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
static struct workqueue_struct *md_rdev_misc_wq;
struct workqueue_struct *md_bitmap_wq;

static int remove_and_add_spares(struct mddev *mddev,
				 struct md_rdev *this);
@@ -9695,6 +9696,11 @@ static int __init md_init(void)
	if (!md_rdev_misc_wq)
		goto err_rdev_misc_wq;

	md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
				       0);
	if (!md_bitmap_wq)
		goto err_bitmap_wq;

	if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
		goto err_md;

@@ -9716,6 +9722,8 @@ static int __init md_init(void)
err_mdp:
	unregister_blkdev(MD_MAJOR, "md");
err_md:
	destroy_workqueue(md_bitmap_wq);
err_bitmap_wq:
	destroy_workqueue(md_rdev_misc_wq);
err_rdev_misc_wq:
	destroy_workqueue(md_misc_wq);
@@ -10011,6 +10019,7 @@ static __exit void md_exit(void)
	}
	destroy_workqueue(md_rdev_misc_wq);
	destroy_workqueue(md_misc_wq);
	destroy_workqueue(md_bitmap_wq);
	destroy_workqueue(md_wq);
}

+1 −0
Original line number Diff line number Diff line
@@ -839,6 +839,7 @@ struct mdu_array_info_s;
struct mdu_disk_info_s;

extern int mdp_major;
extern struct workqueue_struct *md_bitmap_wq;
void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
+68 −6
Original line number Diff line number Diff line
@@ -21,12 +21,7 @@
#define IO_MADE_GOOD ((struct bio *)2)

#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)

/* When there are this many requests queue to be written by
 * the raid thread, we become 'congested' to provide back-pressure
 * for writeback.
 */
static int max_queued_requests = 1024;
#define MAX_PLUG_BIO 32

/* for managing resync I/O pages */
struct resync_pages {
@@ -34,6 +29,12 @@ struct resync_pages {
	struct page	*pages[RESYNC_PAGES];
};

struct raid1_plug_cb {
	struct blk_plug_cb	cb;
	struct bio_list		pending;
	unsigned int		count;
};

static void rbio_pool_free(void *rbio, void *data)
{
	kfree(rbio);
@@ -110,3 +111,64 @@ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
		size -= len;
	} while (idx++ < RESYNC_PAGES && size > 0);
}


static inline void raid1_submit_write(struct bio *bio)
{
	struct md_rdev *rdev = (void *)bio->bi_disk;

	bio->bi_next = NULL;
	bio_set_dev(bio, rdev->bdev);
	if (test_bit(Faulty, &rdev->flags))
		bio_io_error(bio);
	else if (unlikely(bio_op(bio) ==  REQ_OP_DISCARD &&
			  !blk_queue_discard(bio->bi_disk->queue)))
		/* Just ignore it */
		bio_endio(bio);
	else
		submit_bio_noacct(bio);
}

static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
				      blk_plug_cb_fn unplug, int copies)
{
	struct raid1_plug_cb *plug = NULL;
	struct blk_plug_cb *cb;

	/*
	 * If bitmap is not enabled, it's safe to submit the io directly, and
	 * this can get optimal performance.
	 */
	if (!md_bitmap_enabled(mddev->bitmap)) {
		raid1_submit_write(bio);
		return true;
	}

	cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
	if (!cb)
		return false;

	plug = container_of(cb, struct raid1_plug_cb, cb);
	bio_list_add(&plug->pending, bio);
	if (++plug->count / MAX_PLUG_BIO >= copies) {
		list_del(&cb->list);
		cb->callback(cb, false);
	}


	return true;
}

/*
 * current->bio_list will be set under submit_bio() context, in this case bitmap
 * io will be added to the list and wait for current io submission to finish,
 * while current io submission must wait for bitmap io to be done. In order to
 * avoid such deadlock, submit bitmap io asynchronously.
 */
static inline void raid1_prepare_flush_writes(struct bitmap *bitmap)
{
	if (current->bio_list)
		md_bitmap_unplug_async(bitmap);
	else
		md_bitmap_unplug(bitmap);
}
Loading