Commit b90cfaed authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: cleanup and rename __blk_mq_alloc_request



The newly added loop for the cached requests in __blk_mq_alloc_request
is a little too convoluted for my taste, so unwind it a bit.  Also
rename the function to __blk_mq_alloc_requests now that it can allocate
more than a single request.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211012104045.658051-2-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 47c122e3
Loading
Loading
Loading
Loading
+28 −28
Original line number Diff line number Diff line
@@ -354,7 +354,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
	return rq;
}

static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
{
	struct request_queue *q = data->q;
	struct elevator_queue *e = q->elevator;
@@ -395,38 +395,38 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
	 */
	do {
		tag = blk_mq_get_tag(data);
		if (tag != BLK_MQ_NO_TAG) {
			rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
			if (!--data->nr_tags)
				return rq;
			if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
				return rq;
			rq->rq_next = *data->cached_rq;
			*data->cached_rq = rq;
			data->flags |= BLK_MQ_REQ_NOWAIT;
			continue;
		}
		if (tag == BLK_MQ_NO_TAG) {
			if (data->flags & BLK_MQ_REQ_NOWAIT)
				break;

			/*
		 * Give up the CPU and sleep for a random short time to ensure
		 * that thread using a realtime scheduling class are migrated
		 * off the CPU, and thus off the hctx that is going away.
			 * Give up the CPU and sleep for a random short time to
			 * ensure that thread using a realtime scheduling class
			 * are migrated off the CPU, and thus off the hctx that
			 * is going away.
			 */
			msleep(3);
			goto retry;
		}

		rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns);
		if (!--data->nr_tags || e ||
		    (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
			return rq;

		/* link into the cached list */
		rq->rq_next = *data->cached_rq;
		*data->cached_rq = rq;
		data->flags |= BLK_MQ_REQ_NOWAIT;
	} while (1);

	if (data->cached_rq) {
	if (!data->cached_rq)
		return NULL;

	rq = *data->cached_rq;
	*data->cached_rq = rq->rq_next;
	return rq;
}

	return NULL;
}

struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
		blk_mq_req_flags_t flags)
{
@@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
	if (ret)
		return ERR_PTR(ret);

	rq = __blk_mq_alloc_request(&data);
	rq = __blk_mq_alloc_requests(&data);
	if (!rq)
		goto out_queue_exit;
	rq->__data_len = 0;
@@ -2258,7 +2258,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
			plug->nr_ios = 1;
			data.cached_rq = &plug->cached_rq;
		}
		rq = __blk_mq_alloc_request(&data);
		rq = __blk_mq_alloc_requests(&data);
		if (unlikely(!rq)) {
			rq_qos_cleanup(q, bio);
			if (bio->bi_opf & REQ_NOWAIT)