Commit 579c6f13 authored by Yu Kuai's avatar Yu Kuai Committed by Zheng Zengkai
Browse files

blk-mq: don't access request_wrapper if request is not allocated from block layer

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I65K8D


CVE: NA

--------------------------------

request_wrapper is used to fix kabi broken for request, it's only for
internal use. This patch make sure out-of-tree drivers won't access
request_wrapper if request is not managed by block layer.

Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarHou Tao <houtao1@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent a4814b31
Loading
Loading
Loading
Loading
+27 −22
Original line number Diff line number Diff line
@@ -1304,6 +1304,32 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
	}
}

static void blk_account_io_latency(struct request *req, u64 now, const int sgrp)
{
	u64 stat_time;
	struct request_wrapper *rq_wrapper;

	if (!IS_ENABLED(CONFIG_64BIT) || !(req->rq_flags & RQF_FROM_BLOCK)) {
		part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
		return;
	}

	rq_wrapper = request_to_wrapper(req);
	stat_time = READ_ONCE(rq_wrapper->stat_time_ns);
	/*
	 * This might fail if 'stat_time_ns' is updated
	 * in blk_mq_check_inflight_with_stat().
	 */
	if (likely(now > stat_time &&
		   cmpxchg64(&rq_wrapper->stat_time_ns, stat_time, now)
		   == stat_time)) {
		u64 duration = stat_time ? now - stat_time :
			now - req->start_time_ns;

		part_stat_add(req->part, nsecs[sgrp], duration);
	}
}

void blk_account_io_done(struct request *req, u64 now)
{
	/*
@@ -1315,36 +1341,15 @@ void blk_account_io_done(struct request *req, u64 now)
	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
		const int sgrp = op_stat_group(req_op(req));
		struct hd_struct *part;
#ifdef CONFIG_64BIT
		u64 stat_time;
		struct request_wrapper *rq_wrapper = request_to_wrapper(req);
#endif

		part_stat_lock();
		part = req->part;
		update_io_ticks(part, jiffies, true);
		part_stat_inc(part, ios[sgrp]);
#ifdef CONFIG_64BIT
		stat_time = READ_ONCE(rq_wrapper->stat_time_ns);
		/*
		 * This might fail if 'stat_time_ns' is updated
		 * in blk_mq_check_inflight_with_stat().
		 */
		if (likely(now > stat_time &&
			   cmpxchg64(&rq_wrapper->stat_time_ns, stat_time, now)
			   == stat_time)) {
			u64 duation = stat_time ? now - stat_time :
				now - req->start_time_ns;

			part_stat_add(req->part, nsecs[sgrp], duation);
		}
#else
		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
#endif
		blk_account_io_latency(req, now, sgrp);
		if (precise_iostat)
			part_stat_local_dec(part, in_flight[rq_data_dir(req)]);
		part_stat_unlock();

		hd_struct_put(part);
	}
}
+1 −1
Original line number Diff line number Diff line
@@ -333,7 +333,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,

	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
	flush_rq->rq_flags |= RQF_FLUSH_SEQ | RQF_FROM_BLOCK;
	flush_rq->rq_disk = first_rq->rq_disk;
	flush_rq->end_io = flush_end_io;
	/*
+3 −2
Original line number Diff line number Diff line
@@ -360,8 +360,9 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
	blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
		       ARRAY_SIZE(cmd_flag_name));
	seq_puts(m, ", .rq_flags=");
	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
		       ARRAY_SIZE(rqf_name));
	blk_flags_show(m,
		       (__force unsigned int)(rq->rq_flags & ~RQF_FROM_BLOCK),
		       rqf_name, ARRAY_SIZE(rqf_name));
	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
		   rq->internal_tag);
+2 −3
Original line number Diff line number Diff line
@@ -115,9 +115,8 @@ static bool blk_mq_check_inflight_with_stat(struct blk_mq_hw_ctx *hctx,
		struct request_wrapper *rq_wrapper;

		mi->inflight[rq_data_dir(rq)]++;
		if (!rq->part)
		if (!rq->part || !(rq->rq_flags & RQF_FROM_BLOCK))
			return true;

		/*
		 * If the request is started after 'part->stat_time' is set,
		 * don't update 'nsces' here.
@@ -375,7 +374,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
	rq->q = data->q;
	rq->mq_ctx = data->ctx;
	rq->mq_hctx = data->hctx;
	rq->rq_flags = 0;
	rq->rq_flags = RQF_FROM_BLOCK;
	rq->cmd_flags = data->cmd_flags;
	if (data->flags & BLK_MQ_REQ_PM)
		rq->rq_flags |= RQF_PM;
+2 −0
Original line number Diff line number Diff line
@@ -115,6 +115,8 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_MQ_POLL_SLEPT	((__force req_flags_t)(1 << 20))
/* ->timeout has been called, don't expire again */
#define RQF_TIMED_OUT		((__force req_flags_t)(1 << 21))
/* The rq is allocated from block layer */
#define RQF_FROM_BLOCK		((__force req_flags_t)(1 << 22))

/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \