Commit f44c7dbd authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-5.16-2021-11-13' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Set of fixes that should go into this merge window:

   - ioctl vs read data race fixes (Shin'ichiro)

   - blkcg use-after-free fix (Laibin)

   - Last piece of the puzzle for add_disk() error handling, enable
     __must_check for (Luis)

   - Request allocation fixes (Ming)

   - Misc fixes (me)"

* tag 'block-5.16-2021-11-13' of git://git.kernel.dk/linux-block:
  blk-mq: fix filesystem I/O request allocation
  blkcg: Remove extra blkcg_bio_issue_init
  block: Hold invalidate_lock in BLKRESETZONE ioctl
  blk-mq: rename blk_attempt_bio_merge
  blk-mq: don't grab ->q_usage_counter in blk_mq_sched_bio_merge
  block: fix kerneldoc for disk_register_independent_access__ranges()
  block: add __must_check for *add_disk*() callers
  block: use enum type for blk_mq_alloc_data->rq_flags
  block: Hold invalidate_lock in BLKZEROOUT ioctl
  block: Hold invalidate_lock in BLKDISCARD ioctl
parents 2b7196a2 b637108a
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -809,10 +809,8 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
	if (unlikely(!current->io_context))
		create_task_io_context(current, GFP_ATOMIC, q->node);

	if (blk_throtl_bio(bio)) {
		blkcg_bio_issue_init(bio);
	if (blk_throtl_bio(bio))
		return false;
	}

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
+2 −2
Original line number Diff line number Diff line
@@ -104,8 +104,8 @@ static struct kobj_type blk_ia_ranges_ktype = {
};

/**
 * disk_register_ia_ranges - register with sysfs a set of independent
 *			    access ranges
 * disk_register_independent_access_ranges - register with sysfs a set of
 *		independent access ranges
 * @disk:	Target disk
 * @new_iars:	New set of independent access ranges
 *
+0 −4
Original line number Diff line number Diff line
@@ -370,9 +370,6 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
	bool ret = false;
	enum hctx_type type;

	if (bio_queue_enter(bio))
		return false;

	if (e && e->type->ops.bio_merge) {
		ret = e->type->ops.bio_merge(q, bio, nr_segs);
		goto out_put;
@@ -397,7 +394,6 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,

	spin_unlock(&ctx->lock);
out_put:
	blk_queue_exit(q);
	return ret;
}

+35 −12
Original line number Diff line number Diff line
@@ -2495,8 +2495,9 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
	return BLK_MAX_REQUEST_COUNT;
}

static bool blk_attempt_bio_merge(struct request_queue *q, struct bio *bio,
				  unsigned int nr_segs, bool *same_queue_rq)
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
				     struct bio *bio, unsigned int nr_segs,
				     bool *same_queue_rq)
{
	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
		if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
@@ -2520,12 +2521,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
	};
	struct request *rq;

	if (unlikely(bio_queue_enter(bio)))
	if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
		return NULL;
	if (unlikely(!submit_bio_checks(bio)))
		goto put_exit;
	if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
		goto put_exit;

	rq_qos_throttle(q, bio);

@@ -2542,26 +2539,44 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
	rq_qos_cleanup(q, bio);
	if (bio->bi_opf & REQ_NOWAIT)
		bio_wouldblock_error(bio);
put_exit:
	blk_queue_exit(q);

	return NULL;
}

static inline bool blk_mq_can_use_cached_rq(struct request *rq,
		struct bio *bio)
{
	if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
		return false;

	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
		return false;

	return true;
}

static inline struct request *blk_mq_get_request(struct request_queue *q,
						 struct blk_plug *plug,
						 struct bio *bio,
						 unsigned int nsegs,
						 bool *same_queue_rq)
{
	if (plug) {
	struct request *rq;
	bool checked = false;

	if (plug) {

		rq = rq_list_peek(&plug->cached_rq);
		if (rq && rq->q == q) {
			if (unlikely(!submit_bio_checks(bio)))
				return NULL;
			if (blk_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
			if (blk_mq_attempt_bio_merge(q, bio, nsegs,
						same_queue_rq))
				return NULL;
			checked = true;
			if (!blk_mq_can_use_cached_rq(rq, bio))
				goto fallback;
			rq->cmd_flags = bio->bi_opf;
			plug->cached_rq = rq_list_next(rq);
			INIT_LIST_HEAD(&rq->queuelist);
			rq_qos_throttle(q, bio);
@@ -2569,7 +2584,15 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
		}
	}

	return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
fallback:
	if (unlikely(bio_queue_enter(bio)))
		return NULL;
	if (!checked && !submit_bio_checks(bio))
		return NULL;
	rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
	if (!rq)
		blk_queue_exit(q);
	return rq;
}

/**
+16 −12
Original line number Diff line number Diff line
@@ -89,15 +89,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
}

/*
 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
 * @q: request queue
 * @flags: request command flags
 * @ctx: software queue cpu ctx
 */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
						     unsigned int flags,
						     struct blk_mq_ctx *ctx)
static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
{
	enum hctx_type type = HCTX_TYPE_DEFAULT;

@@ -108,8 +100,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
		type = HCTX_TYPE_POLL;
	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
		type = HCTX_TYPE_READ;
	return type;
}

	return ctx->hctxs[type];
/*
 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
 * @q: request queue
 * @flags: request command flags
 * @ctx: software queue cpu ctx
 */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
						     unsigned int flags,
						     struct blk_mq_ctx *ctx)
{
	return ctx->hctxs[blk_mq_get_hctx_type(flags)];
}

/*
@@ -149,7 +153,7 @@ struct blk_mq_alloc_data {
	blk_mq_req_flags_t flags;
	unsigned int shallow_depth;
	unsigned int cmd_flags;
	unsigned int rq_flags;
	req_flags_t rq_flags;

	/* allocate multiple requests/tags in one go */
	unsigned int nr_tags;
Loading