Commit f851453b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'io_uring-6.2-2023-01-27' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:
 "Two small fixes for this release:

   - Sanitize how async prep is done for drain requests, so we ensure
     that it always gets done (Dylan)

   - A ring provided buffer recycling fix for multishot receive (me)"

* tag 'io_uring-6.2-2023-01-27' of git://git.kernel.dk/linux:
  io_uring: always prep_async for drain requests
  io_uring/net: cache provided buffer group value for multishot receives
parents 28cca23d ef5c600a
Loading
Loading
Loading
Loading
+8 −10
Original line number Diff line number Diff line
@@ -1765,17 +1765,12 @@ static __cold void io_drain_req(struct io_kiocb *req)
	}
	spin_unlock(&ctx->completion_lock);

	ret = io_req_prep_async(req);
	if (ret) {
fail:
		io_req_defer_failed(req, ret);
		return;
	}
	io_prep_async_link(req);
	de = kmalloc(sizeof(*de), GFP_KERNEL);
	if (!de) {
		ret = -ENOMEM;
		goto fail;
		io_req_defer_failed(req, ret);
		return;
	}

	spin_lock(&ctx->completion_lock);
@@ -2048,13 +2043,16 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
		req->flags &= ~REQ_F_HARDLINK;
		req->flags |= REQ_F_LINK;
		io_req_defer_failed(req, req->cqe.res);
	} else if (unlikely(req->ctx->drain_active)) {
		io_drain_req(req);
	} else {
		int ret = io_req_prep_async(req);

		if (unlikely(ret))
		if (unlikely(ret)) {
			io_req_defer_failed(req, ret);
			return;
		}

		if (unlikely(req->ctx->drain_active))
			io_drain_req(req);
		else
			io_queue_iowq(req, NULL);
	}
+11 −0
Original line number Diff line number Diff line
@@ -62,6 +62,7 @@ struct io_sr_msg {
	u16				flags;
	/* initialised and used only by !msg send variants */
	u16				addr_len;
	u16				buf_group;
	void __user			*addr;
	/* used only for send zerocopy */
	struct io_kiocb 		*notif;
@@ -580,6 +581,15 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
		if (req->opcode == IORING_OP_RECV && sr->len)
			return -EINVAL;
		req->flags |= REQ_F_APOLL_MULTISHOT;
		/*
		 * Store the buffer group for this multishot receive separately,
		 * as if we end up doing an io-wq based issue that selects a
		 * buffer, it has to be committed immediately and that will
		 * clear ->buf_list. This means we lose the link to the buffer
		 * list, and the eventual buffer put on completion then cannot
		 * restore it.
		 */
		sr->buf_group = req->buf_index;
	}

#ifdef CONFIG_COMPAT
@@ -596,6 +606,7 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)

	sr->done_io = 0;
	sr->len = 0; /* get from the provided buffer */
	req->buf_index = sr->buf_group;
}

/*