Commit de671d61 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: change request end_io handler to pass back a return value



Everything is just converted to returning RQ_END_IO_NONE, and there
should be no functional changes with this patch.

In preparation for allowing the end_io handler to pass ownership back
to the block layer, rather than retain ownership of the request.

Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4b6a5d9c
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -217,7 +217,8 @@ static void blk_flush_complete_seq(struct request *rq,
	blk_kick_flush(q, fq, cmd_flags);
}

static void flush_end_io(struct request *flush_rq, blk_status_t error)
static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
				       blk_status_t error)
{
	struct request_queue *q = flush_rq->q;
	struct list_head *running;
@@ -231,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
	if (!req_ref_put_and_test(flush_rq)) {
		fq->rq_status = error;
		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
		return;
		return RQ_END_IO_NONE;
	}

	blk_account_io_flush(flush_rq);
@@ -268,6 +269,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
	}

	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
	return RQ_END_IO_NONE;
}

bool is_flush_rq(struct request *rq)
@@ -353,7 +355,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
	blk_flush_queue_rq(flush_rq, false);
}

static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
					       blk_status_t error)
{
	struct request_queue *q = rq->q;
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
@@ -375,6 +378,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);

	blk_mq_sched_restart(hctx);
	return RQ_END_IO_NONE;
}

/**
+9 −5
Original line number Diff line number Diff line
@@ -1001,7 +1001,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)

	if (rq->end_io) {
		rq_qos_done(rq->q, rq);
		rq->end_io(rq, error);
		if (rq->end_io(rq, error) == RQ_END_IO_FREE)
			blk_mq_free_request(rq);
	} else {
		blk_mq_free_request(rq);
	}
@@ -1295,12 +1296,13 @@ struct blk_rq_wait {
	blk_status_t ret;
};

static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
{
	struct blk_rq_wait *wait = rq->end_io_data;

	wait->ret = ret;
	complete(&wait->done);
	return RQ_END_IO_NONE;
}

bool blk_rq_is_poll(struct request *rq)
@@ -1534,11 +1536,13 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)

void blk_mq_put_rq_ref(struct request *rq)
{
	if (is_flush_rq(rq))
		rq->end_io(rq, 0);
	else if (req_ref_put_and_test(rq))
	if (is_flush_rq(rq)) {
		if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
			blk_mq_free_request(rq);
	} else if (req_ref_put_and_test(rq)) {
		__blk_mq_free_request(rq);
	}
}

static bool blk_mq_check_expired(struct request *rq, void *priv)
{
+3 −1
Original line number Diff line number Diff line
@@ -292,11 +292,13 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
	dm_complete_request(rq, error);
}

static void end_clone_request(struct request *clone, blk_status_t error)
static enum rq_end_io_ret end_clone_request(struct request *clone,
					    blk_status_t error)
{
	struct dm_rq_target_io *tio = clone->end_io_data;

	dm_complete_request(tio->orig, error);
	return RQ_END_IO_NONE;
}

static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
+4 −2
Original line number Diff line number Diff line
@@ -1172,7 +1172,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
}

static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
						 blk_status_t status)
{
	struct nvme_ctrl *ctrl = rq->end_io_data;
	unsigned long flags;
@@ -1184,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
		dev_err(ctrl->device,
			"failed nvme_keep_alive_end_io error=%d\n",
				status);
		return;
		return RQ_END_IO_NONE;
	}

	ctrl->comp_seen = false;
@@ -1195,6 +1196,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
	spin_unlock_irqrestore(&ctrl->lock, flags);
	if (startka)
		nvme_queue_keep_alive_work(ctrl);
	return RQ_END_IO_NONE;
}

static void nvme_keep_alive_work(struct work_struct *work)
+4 −1
Original line number Diff line number Diff line
@@ -392,7 +392,8 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
	io_uring_cmd_done(ioucmd, status, result);
}

static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
						blk_status_t err)
{
	struct io_uring_cmd *ioucmd = req->end_io_data;
	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
@@ -411,6 +412,8 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
		nvme_uring_task_cb(ioucmd);
	else
		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);

	return RQ_END_IO_NONE;
}

static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
Loading