Commit 8f4ae0f6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-5.13-2021-05-14' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Fix for shared tag set exit (Bart)

 - Correct ioctl range for zoned ioctls (Damien)

 - Removed dead/unused function (Lin)

 - Fix perf regression for shared tags (Ming)

 - Fix out-of-bounds issue with kyber and preemption (Omar)

 - BFQ merge fix (Paolo)

 - Two error handling fixes for nbd (Sun)

 - Fix weight update in blk-iocost (Tejun)

 - NVMe pull request (Christoph):
      - correct the check for using the inline bio in nvmet (Chaitanya
        Kulkarni)
      - demote unsupported command warnings (Chaitanya Kulkarni)
      - fix corruption due to double initializing ANA state (me, Hou Pu)
      - reset ns->file when open fails (Daniel Wagner)
      - fix a NULL deref when SEND is completed with error in nvmet-rdma
        (Michal Kalderon)

 - Fix kernel-doc warning (Bart)

* tag 'block-5.13-2021-05-14' of git://git.kernel.dk/linux-block:
  block/partitions/efi.c: Fix the efi_partition() kernel-doc header
  blk-mq: Swap two calls in blk_mq_exit_queue()
  blk-mq: plug request for shared sbitmap
  nvmet: use new ana_log_size instead the old one
  nvmet: seset ns->file when open fails
  nbd: share nbd_put and return by goto put_nbd
  nbd: Fix NULL pointer in flush_workqueue
  blkdev.h: remove unused codes blk_account_rq
  block, bfq: avoid circular stable merges
  blk-iocost: fix weight updates of inner active iocgs
  nvmet: demote fabrics cmd parse err msg to debug
  nvmet: use helper to remove the duplicate code
  nvmet: demote discovery cmd parse err msg to debug
  nvmet-rdma: Fix NULL deref when SEND is completed with error
  nvmet: fix inline bio check for passthru
  nvmet: fix inline bio check for bdev-ns
  nvme-multipath: fix double initialization of ANA state
  kyber: fix out of bounds access when preempted
  block: uapi: fix comment about block device ioctl
parents 56015910 4bc20823
Loading
Loading
Loading
Loading
+30 −4
Original line number Diff line number Diff line
@@ -372,9 +372,38 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
	return bic->bfqq[is_sync];
}

static void bfq_put_stable_ref(struct bfq_queue *bfqq);

void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
{
	/*
	 * If bfqq != NULL, then a non-stable queue merge between
	 * bic->bfqq and bfqq is happening here. This causes troubles
	 * in the following case: bic->bfqq has also been scheduled
	 * for a possible stable merge with bic->stable_merge_bfqq,
	 * and bic->stable_merge_bfqq == bfqq happens to
	 * hold. Troubles occur because bfqq may then undergo a split,
	 * thereby becoming eligible for a stable merge. Yet, if
	 * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
	 * would be stably merged with itself. To avoid this anomaly,
	 * we cancel the stable merge if
	 * bic->stable_merge_bfqq == bfqq.
	 */
	bic->bfqq[is_sync] = bfqq;

	if (bfqq && bic->stable_merge_bfqq == bfqq) {
		/*
		 * Actually, these same instructions are executed also
		 * in bfq_setup_cooperator, in case of abort or actual
		 * execution of a stable merge. We could avoid
		 * repeating these instructions there too, but if we
		 * did so, we would nest even more complexity in this
		 * function.
		 */
		bfq_put_stable_ref(bic->stable_merge_bfqq);

		bic->stable_merge_bfqq = NULL;
	}
}

struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
@@ -2263,10 +2292,9 @@ static void bfq_remove_request(struct request_queue *q,

}

static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs)
{
	struct request_queue *q = hctx->queue;
	struct bfq_data *bfqd = q->elevator->elevator_data;
	struct request *free = NULL;
	/*
@@ -2631,8 +2659,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
					     struct bfq_queue *bfqq);

static void bfq_put_stable_ref(struct bfq_queue *bfqq);

/*
 * Attempt to schedule a merge of bfqq with the currently in-service
 * queue or with a close queue among the scheduled queues.  Return
+12 −2
Original line number Diff line number Diff line
@@ -1069,7 +1069,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,

	lockdep_assert_held(&ioc->lock);

	/*
	 * For an active leaf node, its inuse shouldn't be zero or exceed
	 * @active. An active internal node's inuse is solely determined by the
	 * inuse to active ratio of its children regardless of @inuse.
	 */
	if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
		inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
					   iocg->child_active_sum);
	} else {
		inuse = clamp_t(u32, inuse, 1, active);
	}

	iocg->last_inuse = iocg->inuse;
	if (save)
@@ -1086,7 +1096,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
		/* update the level sums */
		parent->child_active_sum += (s32)(active - child->active);
		parent->child_inuse_sum += (s32)(inuse - child->inuse);
		/* apply the udpates */
		/* apply the updates */
		child->active = active;
		child->inuse = inuse;

+5 −3
Original line number Diff line number Diff line
@@ -358,14 +358,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs)
{
	struct elevator_queue *e = q->elevator;
	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
	struct blk_mq_ctx *ctx;
	struct blk_mq_hw_ctx *hctx;
	bool ret = false;
	enum hctx_type type;

	if (e && e->type->ops.bio_merge)
		return e->type->ops.bio_merge(hctx, bio, nr_segs);
		return e->type->ops.bio_merge(q, bio, nr_segs);

	ctx = blk_mq_get_ctx(q);
	hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
	type = hctx->type;
	if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
	    list_empty_careful(&ctx->rq_lists[type]))
+7 −4
Original line number Diff line number Diff line
@@ -2232,8 +2232,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
		/* Bypass scheduler for flush requests */
		blk_insert_flush(rq);
		blk_mq_run_hw_queue(data.hctx, true);
	} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
				!blk_queue_nonrot(q))) {
	} else if (plug && (q->nr_hw_queues == 1 ||
		   blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
		   q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
		/*
		 * Use plugging if we have a ->commit_rqs() hook as well, as
		 * we know the driver uses bd->last in a smart fashion.
@@ -3287,8 +3288,10 @@ void blk_mq_exit_queue(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	blk_mq_del_queue_tag_set(q);
	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
	blk_mq_del_queue_tag_set(q);
}

static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+3 −2
Original line number Diff line number Diff line
@@ -561,11 +561,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
	}
}

static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs)
{
	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
	struct kyber_hctx_data *khd = hctx->sched_data;
	struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
	struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
	unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
	struct list_head *rq_list = &kcq->rq_list[sched_domain];
Loading