Commit bf14e2b2 authored by Damien Le Moal's avatar Damien Le Moal Committed by Mike Snitzer
Browse files

dm: Forbid requeue of writes to zones



A target map method requesting the requeue of a bio with
DM_MAPIO_REQUEUE or completing it with DM_ENDIO_REQUEUE can cause
unaligned write errors if the bio is a write operation targeting a
sequential zone. If a zoned target request such a requeue, warn about
it and kill the IO.

The function dm_is_zone_write() is introduced to detect write operations
to zoned targets.

This change does not affect the target drivers supporting zoned devices
and exposing a zoned device, namely dm-crypt, dm-linear and dm-flakey as
none of these targets ever request a requeue.

Signed-off-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarHimanshu Madhani <himanshu.madhani@oracle.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 912e8875
Loading
Loading
Loading
Loading
+17 −0
Original line number Diff line number Diff line
@@ -104,6 +104,23 @@ int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
}
EXPORT_SYMBOL_GPL(dm_report_zones);

bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
	struct request_queue *q = md->queue;

	if (!blk_queue_is_zoned(q))
		return false;

	switch (bio_op(bio)) {
	case REQ_OP_WRITE_ZEROES:
	case REQ_OP_WRITE_SAME:
	case REQ_OP_WRITE:
		return !op_is_flush(bio->bi_opf) && bio_sectors(bio);
	default:
		return false;
	}
}

void dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q)
{
	if (!blk_queue_is_zoned(q))
+19 −6
Original line number Diff line number Diff line
@@ -841,22 +841,27 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
	}

	if (atomic_dec_and_test(&io->io_count)) {
		bio = io->orig_bio;
		if (io->status == BLK_STS_DM_REQUEUE) {
			/*
			 * Target requested pushing back the I/O.
			 */
			spin_lock_irqsave(&md->deferred_lock, flags);
			if (__noflush_suspending(md))
			if (__noflush_suspending(md) &&
			    !WARN_ON_ONCE(dm_is_zone_write(md, bio))) {
				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
				bio_list_add_head(&md->deferred, io->orig_bio);
			else
				/* noflush suspend was interrupted. */
				bio_list_add_head(&md->deferred, bio);
			} else {
				/*
				 * noflush suspend was interrupted or this is
				 * a write to a zoned target.
				 */
				io->status = BLK_STS_IOERR;
			}
			spin_unlock_irqrestore(&md->deferred_lock, flags);
		}

		io_error = io->status;
		bio = io->orig_bio;
		end_io_acct(io);
		free_io(md, io);

@@ -947,6 +952,14 @@ static void clone_endio(struct bio *bio)
		int r = endio(tio->ti, bio, &error);
		switch (r) {
		case DM_ENDIO_REQUEUE:
			/*
			 * Requeuing writes to a sequential zone of a zoned
			 * target will break the sequential write pattern:
			 * fail such IO.
			 */
			if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
				error = BLK_STS_IOERR;
			else
				error = BLK_STS_DM_REQUEUE;
			fallthrough;
		case DM_ENDIO_DONE:
+5 −0
Original line number Diff line number Diff line
@@ -107,8 +107,13 @@ void dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
			unsigned int nr_zones, report_zones_cb cb, void *data);
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
#else
#define dm_blk_report_zones	NULL
static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
{
	return false;
}
#endif

/*-----------------------------------------------------------------