Commit e2736347 authored by Mike Snitzer's avatar Mike Snitzer
Browse files

dm: factor out dm_io_complete



Optimizes dm_io_dec_pending() slightly by avoiding local variables.

Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 69596f55
Loading
Loading
Loading
Loading
+77 −72
Original line number Diff line number Diff line
@@ -841,28 +841,14 @@ static int __noflush_suspending(struct mapped_device *md)
	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}

/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
static void dm_io_complete(struct dm_io *io)
{
	unsigned long flags;
	blk_status_t io_error;
	struct bio *bio;
	struct mapped_device *md = io->md;
	struct bio *bio = io->orig_bio;

	/* Push-back supersedes any I/O errors */
	if (unlikely(error)) {
		spin_lock_irqsave(&io->endio_lock, flags);
		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
			io->status = error;
		spin_unlock_irqrestore(&io->endio_lock, flags);
	}

	if (atomic_dec_and_test(&io->io_count)) {
		bio = io->orig_bio;
	if (io->status == BLK_STS_DM_REQUEUE) {
		unsigned long flags;
		/*
		 * Target requested pushing back the I/O.
		 */
@@ -924,6 +910,25 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
		bio_endio(bio);
	}
}

/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
{
	/* Push-back supersedes any I/O errors */
	if (unlikely(error)) {
		unsigned long flags;
		spin_lock_irqsave(&io->endio_lock, flags);
		if (!(io->status == BLK_STS_DM_REQUEUE &&
		      __noflush_suspending(io->md)))
			io->status = error;
		spin_unlock_irqrestore(&io->endio_lock, flags);
	}

	if (atomic_dec_and_test(&io->io_count))
		dm_io_complete(io);
}

void disable_discard(struct mapped_device *md)
@@ -1562,7 +1567,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,

	if (bio->bi_opf & REQ_PREFLUSH) {
		error = __send_empty_flush(&ci);
		/* dm_io_dec_pending submits any data associated with flush */
		/* dm_io_complete submits any data associated with flush */
		goto out;
	}

@@ -1575,7 +1580,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
	 * Remainder must be passed to submit_bio_noacct() so it gets handled
	 * *after* bios already submitted have been completely processed.
	 * We take a clone of the original to store in ci.io->orig_bio to be
	 * used by dm_end_io_acct() and for dm_io_dec_pending() to use for
	 * used by dm_end_io_acct() and for dm_io_complete() to use for
	 * completion handling.
	 */
	orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count,