Commit ec211631 authored by Ming Lei's avatar Ming Lei Committed by Mike Snitzer
Browse files

dm: put all polled dm_io instances into a single list



Now that bio_split() isn't used by DM's bio splitting, it is a bit
overkill to link dm_io into an hlist given there is only single dm_io
in the list.

Convert to using a single list for holding all dm_io instances
associated with this bio.

Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 0f14d60a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -259,7 +259,7 @@ struct dm_io {
	spinlock_t lock;
	unsigned long start_time;
	void *data;
	struct hlist_node node;
	struct dm_io *next;
	struct task_struct *map_task;
	struct dm_stats_aux stats_aux;

+27 −25
Original line number Diff line number Diff line
@@ -1559,7 +1559,7 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
}

/*
 * Reuse ->bi_private as hlist head for storing all dm_io instances
 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
 * associated with this bio, and this bio's bi_private needs to be
 * stored in dm_io->data before the reuse.
 *
@@ -1567,36 +1567,37 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
 * touch it after splitting. Meantime it won't be changed by anyone after
 * bio is submitted. So this reuse is safe.
 */
static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio)
static inline struct dm_io **dm_poll_list_head(struct bio *bio)
{
	return (struct hlist_head *)&bio->bi_private;
	return (struct dm_io **)&bio->bi_private;
}

static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
{
	struct hlist_head *head = dm_get_bio_hlist_head(bio);
	struct dm_io **head = dm_poll_list_head(bio);

	if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
		bio->bi_opf |= REQ_DM_POLL_LIST;
		/*
		 * Save .bi_private into dm_io, so that we can reuse
		 * .bi_private as hlist head for storing dm_io list
		 * .bi_private as dm_io list head for storing dm_io list
		 */
		io->data = bio->bi_private;

		INIT_HLIST_HEAD(head);

		/* tell block layer to poll for completion */
		bio->bi_cookie = ~BLK_QC_T_NONE;

		io->next = NULL;
	} else {
		/*
		 * bio recursed due to split, reuse original poll list,
		 * and save bio->bi_private too.
		 */
		io->data = hlist_entry(head->first, struct dm_io, node)->data;
		io->data = (*head)->data;
		io->next = *head;
	}

	hlist_add_head(&io->node, head);
	*head = io;
}

/*
@@ -1685,8 +1686,8 @@ static void dm_split_and_process_bio(struct mapped_device *md,
	 * Drop the extra reference count for non-POLLED bio, and hold one
	 * reference for POLLED bio, which will be released in dm_poll_bio
	 *
	 * Add every dm_io instance into the hlist_head which is stored in
	 * bio->bi_private, so that dm_poll_bio can poll them all.
	 * Add every dm_io instance into the dm_io list head which is stored
	 * in bio->bi_private, so that dm_poll_bio can poll them all.
	 */
	if (error || !ci.submit_as_polled) {
		/*
@@ -1748,18 +1749,16 @@ static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
		       unsigned int flags)
{
	struct hlist_head *head = dm_get_bio_hlist_head(bio);
	struct hlist_head tmp = HLIST_HEAD_INIT;
	struct hlist_node *next;
	struct dm_io *io;
	struct dm_io **head = dm_poll_list_head(bio);
	struct dm_io *list = *head;
	struct dm_io *tmp = NULL;
	struct dm_io *curr, *next;

	/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
	if (!(bio->bi_opf & REQ_DM_POLL_LIST))
		return 0;

	WARN_ON_ONCE(hlist_empty(head));

	hlist_move_list(head, &tmp);
	WARN_ON_ONCE(!list);

	/*
	 * Restore .bi_private before possibly completing dm_io.
@@ -1770,24 +1769,27 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
	 * clearing REQ_DM_POLL_LIST here.
	 */
	bio->bi_opf &= ~REQ_DM_POLL_LIST;
	bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
	bio->bi_private = list->data;

	hlist_for_each_entry_safe(io, next, &tmp, node) {
		if (dm_poll_dm_io(io, iob, flags)) {
			hlist_del_init(&io->node);
	for (curr = list, next = curr->next; curr; curr = next, next =
			curr ? curr->next : NULL) {
		if (dm_poll_dm_io(curr, iob, flags)) {
			/*
			 * clone_endio() has already occurred, so no
			 * error handling is needed here.
			 */
			__dm_io_dec_pending(io);
			__dm_io_dec_pending(curr);
		} else {
			curr->next = tmp;
			tmp = curr;
		}
	}

	/* Not done? */
	if (!hlist_empty(&tmp)) {
	if (tmp) {
		bio->bi_opf |= REQ_DM_POLL_LIST;
		/* Reset bio->bi_private to dm_io list head */
		hlist_move_list(&tmp, head);
		*head = tmp;
		return 0;
	}
	return 1;