Loading fs/io_uring.c +7 −8 Original line number Diff line number Diff line Loading @@ -5240,7 +5240,7 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock, } static int io_req_defer_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, bool for_async) const struct io_uring_sqe *sqe) { ssize_t ret = 0; Loading @@ -5254,9 +5254,6 @@ static int io_req_defer_prep(struct io_kiocb *req, return ret; } if (for_async || (req->flags & REQ_F_WORK_INITIALIZED)) io_req_work_grab_env(req); switch (req->opcode) { case IORING_OP_NOP: break; Loading Loading @@ -5369,9 +5366,10 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!req->io) { if (io_alloc_async_ctx(req)) return -EAGAIN; ret = io_req_defer_prep(req, sqe, true); ret = io_req_defer_prep(req, sqe); if (ret < 0) return ret; io_req_work_grab_env(req); } spin_lock_irq(&ctx->completion_lock); Loading Loading @@ -5983,9 +5981,10 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = -EAGAIN; if (io_alloc_async_ctx(req)) goto fail_req; ret = io_req_defer_prep(req, sqe, true); ret = io_req_defer_prep(req, sqe); if (unlikely(ret < 0)) goto fail_req; io_req_work_grab_env(req); } /* Loading Loading @@ -6039,7 +6038,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (io_alloc_async_ctx(req)) return -EAGAIN; ret = io_req_defer_prep(req, sqe, false); ret = io_req_defer_prep(req, sqe); if (ret) { /* fail even hard links since we don't submit */ head->flags |= REQ_F_FAIL_LINK; Loading @@ -6066,7 +6065,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (io_alloc_async_ctx(req)) return -EAGAIN; ret = io_req_defer_prep(req, sqe, false); ret = io_req_defer_prep(req, sqe); if (ret) req->flags |= REQ_F_FAIL_LINK; *link = req; Loading Loading
fs/io_uring.c +7 −8 Original line number Diff line number Diff line Loading @@ -5240,7 +5240,7 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock, } static int io_req_defer_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, bool for_async) const struct io_uring_sqe *sqe) { ssize_t ret = 0; Loading @@ -5254,9 +5254,6 @@ static int io_req_defer_prep(struct io_kiocb *req, return ret; } if (for_async || (req->flags & REQ_F_WORK_INITIALIZED)) io_req_work_grab_env(req); switch (req->opcode) { case IORING_OP_NOP: break; Loading Loading @@ -5369,9 +5366,10 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!req->io) { if (io_alloc_async_ctx(req)) return -EAGAIN; ret = io_req_defer_prep(req, sqe, true); ret = io_req_defer_prep(req, sqe); if (ret < 0) return ret; io_req_work_grab_env(req); } spin_lock_irq(&ctx->completion_lock); Loading Loading @@ -5983,9 +5981,10 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ret = -EAGAIN; if (io_alloc_async_ctx(req)) goto fail_req; ret = io_req_defer_prep(req, sqe, true); ret = io_req_defer_prep(req, sqe); if (unlikely(ret < 0)) goto fail_req; io_req_work_grab_env(req); } /* Loading Loading @@ -6039,7 +6038,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (io_alloc_async_ctx(req)) return -EAGAIN; ret = io_req_defer_prep(req, sqe, false); ret = io_req_defer_prep(req, sqe); if (ret) { /* fail even hard links since we don't submit */ head->flags |= REQ_F_FAIL_LINK; Loading @@ -6066,7 +6065,7 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (io_alloc_async_ctx(req)) return -EAGAIN; ret = io_req_defer_prep(req, sqe, false); ret = io_req_defer_prep(req, sqe); if (ret) req->flags |= REQ_F_FAIL_LINK; *link = req; Loading