Loading block/blk-mq-tag.c +2 −2 Original line number Diff line number Diff line Loading @@ -90,9 +90,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) { if (!(data->flags & BLK_MQ_REQ_INTERNAL) && !hctx_may_queue(data->hctx, bt)) if (!data->q->elevator && !hctx_may_queue(data->hctx, bt)) return BLK_MQ_NO_TAG; if (data->shallow_depth) return __sbitmap_queue_get_shallow(bt, data->shallow_depth); else Loading block/blk-mq.c +3 −7 Original line number Diff line number Diff line Loading @@ -279,7 +279,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct request *rq = tags->static_rqs[tag]; req_flags_t rq_flags = 0; if (data->flags & BLK_MQ_REQ_INTERNAL) { if (data->q->elevator) { rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { Loading Loading @@ -364,8 +364,6 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) data->flags |= BLK_MQ_REQ_NOWAIT; if (e) { data->flags |= BLK_MQ_REQ_INTERNAL; /* * Flush requests are special and go directly to the * dispatch list. Don't include reserved tags in the Loading @@ -380,7 +378,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) retry: data->ctx = blk_mq_get_ctx(q); data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); if (!(data->flags & BLK_MQ_REQ_INTERNAL)) if (!e) blk_mq_tag_busy(data->hctx); /* Loading Loading @@ -476,9 +474,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); data.ctx = __blk_mq_get_ctx(q, cpu); if (q->elevator) data.flags |= BLK_MQ_REQ_INTERNAL; else if (!q->elevator) blk_mq_tag_busy(data.hctx); ret = -EWOULDBLOCK; Loading block/blk-mq.h +1 −1 Original line number Diff line number Diff line Loading @@ -159,7 +159,7 @@ struct blk_mq_alloc_data { static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { if (data->flags & BLK_MQ_REQ_INTERNAL) if (data->q->elevator) return data->hctx->sched_tags; return data->hctx->tags; Loading include/linux/blk-mq.h +0 −2 Original line number Diff line number Diff line Loading @@ -447,8 +447,6 @@ enum { BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), /* allocate internal/sched tag */ BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), /* set RQF_PREEMPT */ BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), }; Loading Loading
block/blk-mq-tag.c +2 −2 Original line number Diff line number Diff line Loading @@ -90,9 +90,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) { if (!(data->flags & BLK_MQ_REQ_INTERNAL) && !hctx_may_queue(data->hctx, bt)) if (!data->q->elevator && !hctx_may_queue(data->hctx, bt)) return BLK_MQ_NO_TAG; if (data->shallow_depth) return __sbitmap_queue_get_shallow(bt, data->shallow_depth); else Loading
block/blk-mq.c +3 −7 Original line number Diff line number Diff line Loading @@ -279,7 +279,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct request *rq = tags->static_rqs[tag]; req_flags_t rq_flags = 0; if (data->flags & BLK_MQ_REQ_INTERNAL) { if (data->q->elevator) { rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { Loading Loading @@ -364,8 +364,6 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) data->flags |= BLK_MQ_REQ_NOWAIT; if (e) { data->flags |= BLK_MQ_REQ_INTERNAL; /* * Flush requests are special and go directly to the * dispatch list. Don't include reserved tags in the Loading @@ -380,7 +378,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) retry: data->ctx = blk_mq_get_ctx(q); data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); if (!(data->flags & BLK_MQ_REQ_INTERNAL)) if (!e) blk_mq_tag_busy(data->hctx); /* Loading Loading @@ -476,9 +474,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); data.ctx = __blk_mq_get_ctx(q, cpu); if (q->elevator) data.flags |= BLK_MQ_REQ_INTERNAL; else if (!q->elevator) blk_mq_tag_busy(data.hctx); ret = -EWOULDBLOCK; Loading
block/blk-mq.h +1 −1 Original line number Diff line number Diff line Loading @@ -159,7 +159,7 @@ struct blk_mq_alloc_data { static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { if (data->flags & BLK_MQ_REQ_INTERNAL) if (data->q->elevator) return data->hctx->sched_tags; return data->hctx->tags; Loading
include/linux/blk-mq.h +0 −2 Original line number Diff line number Diff line Loading @@ -447,8 +447,6 @@ enum { BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), /* allocate internal/sched tag */ BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), /* set RQF_PREEMPT */ BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), }; Loading