Commit 9408d8a3 authored by Keith Busch's avatar Keith Busch Committed by Jens Axboe
Browse files

nvme: improved uring polling



Drivers can poll requests directly, so use that. We just need to ensure
the driver's request was allocated from a polled hctx, so a special
driver flag is added to struct io_uring_cmd.

The allows unshared and multipath namespaces to use the same polling
callback, and multipath is guaranteed to get the same queue as the
command was submitted on. Previously multipath polling might check a
different path and poll the wrong info.

The other bonus is we don't need a bio payload in order to poll,
allowing commands like 'flush' and 'write zeroes' to be submitted on the
same high priority queue as read and write commands.

Finally, using the request based polling skips the unnecessary bio
overhead.

Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230612190343.2087040-3-kbusch@meta.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f6c80cff
Loading
Loading
Loading
Loading
+19 −51
Original line number Diff line number Diff line
@@ -505,7 +505,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
{
	struct io_uring_cmd *ioucmd = req->end_io_data;
	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
	void *cookie = READ_ONCE(ioucmd->cookie);

	req->bio = pdu->bio;
	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
@@ -518,10 +517,12 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
	 * For iopoll, complete it directly.
	 * Otherwise, move the completion to task work.
	 */
	if (cookie != NULL && blk_rq_is_poll(req))
	if (blk_rq_is_poll(req)) {
		WRITE_ONCE(ioucmd->cookie, NULL);
		nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
	else
	} else {
		io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
	}

	return RQ_END_IO_FREE;
}
@@ -531,7 +532,6 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
{
	struct io_uring_cmd *ioucmd = req->end_io_data;
	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
	void *cookie = READ_ONCE(ioucmd->cookie);

	req->bio = pdu->bio;
	pdu->req = req;
@@ -540,10 +540,12 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
	 * For iopoll, complete it directly.
	 * Otherwise, move the completion to task work.
	 */
	if (cookie != NULL && blk_rq_is_poll(req))
	if (blk_rq_is_poll(req)) {
		WRITE_ONCE(ioucmd->cookie, NULL);
		nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
	else
	} else {
		io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_meta_cb);
	}

	return RQ_END_IO_NONE;
}
@@ -599,7 +601,6 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
	if (issue_flags & IO_URING_F_IOPOLL)
		rq_flags |= REQ_POLLED;

retry:
	req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
	if (IS_ERR(req))
		return PTR_ERR(req);
@@ -613,17 +614,11 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			return ret;
	}

	if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
		if (unlikely(!req->bio)) {
			/* we can't poll this, so alloc regular req instead */
			blk_mq_free_request(req);
			rq_flags &= ~REQ_POLLED;
			goto retry;
		} else {
			WRITE_ONCE(ioucmd->cookie, req->bio);
			req->bio->bi_opf |= REQ_POLLED;
		}
	if (blk_rq_is_poll(req)) {
		ioucmd->flags |= IORING_URING_CMD_POLLED;
		WRITE_ONCE(ioucmd->cookie, req);
	}

	/* to free bio on completion, as req->bio will be null at that time */
	pdu->bio = req->bio;
	pdu->meta_len = d.metadata_len;
@@ -785,18 +780,16 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
				 struct io_comp_batch *iob,
				 unsigned int poll_flags)
{
	struct bio *bio;
	struct request *req;
	int ret = 0;
	struct nvme_ns *ns;
	struct request_queue *q;

	if (!(ioucmd->flags & IORING_URING_CMD_POLLED))
		return 0;

	rcu_read_lock();
	bio = READ_ONCE(ioucmd->cookie);
	ns = container_of(file_inode(ioucmd->file)->i_cdev,
			struct nvme_ns, cdev);
	q = ns->queue;
	if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
		ret = bio_poll(bio, iob, poll_flags);
	req = READ_ONCE(ioucmd->cookie);
	if (req && blk_rq_is_poll(req))
		ret = blk_rq_poll(req, iob, poll_flags);
	rcu_read_unlock();
	return ret;
}
@@ -890,31 +883,6 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
	srcu_read_unlock(&head->srcu, srcu_idx);
	return ret;
}

int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
				      struct io_comp_batch *iob,
				      unsigned int poll_flags)
{
	struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
	int srcu_idx = srcu_read_lock(&head->srcu);
	struct nvme_ns *ns = nvme_find_path(head);
	struct bio *bio;
	int ret = 0;
	struct request_queue *q;

	if (ns) {
		rcu_read_lock();
		bio = READ_ONCE(ioucmd->cookie);
		q = ns->queue;
		if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
				&& bio->bi_bdev)
			ret = bio_poll(bio, iob, poll_flags);
		rcu_read_unlock();
	}
	srcu_read_unlock(&head->srcu, srcu_idx);
	return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */

int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+1 −1
Original line number Diff line number Diff line
@@ -470,7 +470,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
	.unlocked_ioctl	= nvme_ns_head_chr_ioctl,
	.compat_ioctl	= compat_ptr_ioctl,
	.uring_cmd	= nvme_ns_head_chr_uring_cmd,
	.uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
	.uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
};

static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
+0 −2
Original line number Diff line number Diff line
@@ -854,8 +854,6 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
		unsigned long arg);
int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
		struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
		struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
		unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
+2 −0
Original line number Diff line number Diff line
@@ -244,8 +244,10 @@ enum io_uring_op {
 * sqe->uring_cmd_flags
 * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
 *				along with setting sqe->buf_index.
 * IORING_URING_CMD_POLLED	driver use only
 */
#define IORING_URING_CMD_FIXED	(1U << 0)
#define IORING_URING_CMD_POLLED	(1U << 31)


/*