Commit d2b768c3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'io_uring-6.0-2022-09-09' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

 - Removed function that became unused after last week's merge (Jiapeng)

 - Two small fixes for kbuf recycling (Pavel)

 - Include address copy for zc send for POLLFIRST (Pavel)

 - Fix for short IO handling in the normal read/write path (Pavel)

* tag 'io_uring-6.0-2022-09-09' of git://git.kernel.dk/linux-block:
  io_uring/rw: fix short rw error handling
  io_uring/net: copy addr for zc on POLL_FIRST
  io_uring: recycle kbuf recycle on tw requeue
  io_uring/kbuf: fix not advancing READV kbuf ring
  io_uring/notif: Remove the unused function io_notif_complete()
parents 0099baa8 4d9cb92c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1728,6 +1728,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)

	switch (io_arm_poll_handler(req, 0)) {
	case IO_APOLL_READY:
		io_kbuf_recycle(req, 0);
		io_req_task_queue(req);
		break;
	case IO_APOLL_ABORTED:
+6 −2
Original line number Diff line number Diff line
@@ -91,9 +91,13 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
	 * buffer data. However if that buffer is recycled the original request
	 * data stored in addr is lost. Therefore forbid recycling for now.
	 */
	if (req->opcode == IORING_OP_READV)
	if (req->opcode == IORING_OP_READV) {
		if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
			req->buf_list->head++;
			req->buf_list = NULL;
		}
		return;

	}
	if (req->flags & REQ_F_BUFFER_SELECTED)
		io_kbuf_recycle_legacy(req, issue_flags);
	if (req->flags & REQ_F_BUFFER_RING)
+4 −3
Original line number Diff line number Diff line
@@ -1003,9 +1003,6 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
	unsigned msg_flags, cflags;
	int ret, min_ret = 0;

	if (!(req->flags & REQ_F_POLLED) &&
	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
		return -EAGAIN;
	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;
@@ -1030,6 +1027,10 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
		msg.msg_namelen = zc->addr_len;
	}

	if (!(req->flags & REQ_F_POLLED) &&
	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
		return io_setup_async_addr(req, addr, issue_flags);

	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
		ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
					(u64)(uintptr_t)zc->buf, zc->len);
+0 −8
Original line number Diff line number Diff line
@@ -21,14 +21,6 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
	io_req_task_complete(notif, locked);
}

static inline void io_notif_complete(struct io_kiocb *notif)
	__must_hold(&notif->ctx->uring_lock)
{
	bool locked = true;

	__io_notif_complete_tw(notif, &locked);
}

static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
					  struct ubuf_info *uarg,
					  bool success)
+18 −12
Original line number Diff line number Diff line
@@ -206,6 +206,20 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
	return false;
}

static inline unsigned io_fixup_rw_res(struct io_kiocb *req, unsigned res)
{
	struct io_async_rw *io = req->async_data;

	/* add previously done IO, if any */
	if (req_has_async_data(req) && io->bytes_done > 0) {
		if (res < 0)
			res = io->bytes_done;
		else
			res += io->bytes_done;
	}
	return res;
}

static void io_complete_rw(struct kiocb *kiocb, long res)
{
	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
@@ -213,7 +227,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)

	if (__io_complete_rw_common(req, res))
		return;
	io_req_set_res(req, res, 0);
	io_req_set_res(req, io_fixup_rw_res(req, res), 0);
	req->io_task_work.func = io_req_task_complete;
	io_req_task_work_add(req);
}
@@ -240,22 +254,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
static int kiocb_done(struct io_kiocb *req, ssize_t ret,
		       unsigned int issue_flags)
{
	struct io_async_rw *io = req->async_data;
	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);

	/* add previously done IO, if any */
	if (req_has_async_data(req) && io->bytes_done > 0) {
		if (ret < 0)
			ret = io->bytes_done;
		else
			ret += io->bytes_done;
	}
	unsigned final_ret = io_fixup_rw_res(req, ret);

	if (req->flags & REQ_F_CUR_POS)
		req->file->f_pos = rw->kiocb.ki_pos;
	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
		if (!__io_complete_rw_common(req, ret)) {
			io_req_set_res(req, req->cqe.res,
			io_req_set_res(req, final_ret,
				       io_put_kbuf(req, issue_flags));
			return IOU_OK;
		}
@@ -268,7 +274,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
		if (io_resubmit_prep(req))
			io_req_task_queue_reissue(req);
		else
			io_req_task_queue_fail(req, ret);
			io_req_task_queue_fail(req, final_ret);
	}
	return IOU_ISSUE_SKIP_COMPLETE;
}