Commit 70200574 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove QUEUE_FLAG_DISCARD



Just use a non-zero max_discard_sectors as an indicator for discard
support, similar to what is done for write zeroes.

The only places where needs special attention is the RAID5 driver,
which must clear discard support for security reasons by default,
even if the default stacking rules would allow for it.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: Christoph Böhmwalder <christoph.boehmwalder@linbit.com> [drbd]
Acked-by: Jan Höppner <hoeppner@linux.ibm.com> [s390]
Acked-by: Coly Li <colyli@suse.de> [bcache]
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-25-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent cf0fbf89
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -483,7 +483,6 @@ static void ubd_handler(void)
			if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
				blk_queue_max_discard_sectors(io_req->req->q, 0);
				blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
				blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q);
			}
			blk_mq_end_request(io_req->req, io_req->error);
			kfree(io_req);
@@ -803,7 +802,6 @@ static int ubd_open_dev(struct ubd *ubd_dev)
		ubd_dev->queue->limits.discard_alignment = SECTOR_SIZE;
		blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
		blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
		blk_queue_flag_set(QUEUE_FLAG_DISCARD, ubd_dev->queue);
	}
	blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue);
	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -820,7 +820,7 @@ void submit_bio_noacct(struct bio *bio)

	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
		if (!bdev_max_discard_sectors(bdev))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
+1 −1
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
			return -EOPNOTSUPP;
		op = REQ_OP_SECURE_ERASE;
	} else {
		if (!blk_queue_discard(q))
		if (!bdev_max_discard_sectors(bdev))
			return -EOPNOTSUPP;
		op = REQ_OP_DISCARD;
	}
+0 −1
Original line number Diff line number Diff line
@@ -113,7 +113,6 @@ static const char *const blk_queue_flag_name[] = {
	QUEUE_FLAG_NAME(FAIL_IO),
	QUEUE_FLAG_NAME(NONROT),
	QUEUE_FLAG_NAME(IO_STAT),
	QUEUE_FLAG_NAME(DISCARD),
	QUEUE_FLAG_NAME(NOXMERGES),
	QUEUE_FLAG_NAME(ADD_RANDOM),
	QUEUE_FLAG_NAME(SECERASE),
+1 −2
Original line number Diff line number Diff line
@@ -87,14 +87,13 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
{
	uint64_t range[2];
	uint64_t start, len;
	struct request_queue *q = bdev_get_queue(bdev);
	struct inode *inode = bdev->bd_inode;
	int err;

	if (!(mode & FMODE_WRITE))
		return -EBADF;

	if (!blk_queue_discard(q))
	if (!bdev_max_discard_sectors(bdev))
		return -EOPNOTSUPP;

	if (copy_from_user(range, (void __user *)arg, sizeof(range)))
Loading