Commit 7d47c655 authored by wangyuan's avatar wangyuan
Browse files

crypto: hisilicon/sec2: fix memory use-after-free issue

driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8WCJM


CVE: NA

--------------------------------------------------------

When the number of packets being processed in the hardware
queue is greater than 512, the SEC driver continues to send
the packet to the hardware, but adds the packet to the backlog
list. Then, the SEC driver returns -EBUSY to the caller, and
the caller stops sending packets. When the number of packets
in the queue queried in the callback is less than 512, The
packet sending thread is woken up.

When the number of send packet threads is greater than 512,
packages in the backlog may be complete but the packet is not
deleted from list. The released memory is accessed during the
deletion, causing a system panic.

Therefore, delete the backlog, determine whether the packet
sending thread needs to be woken up based on 'fake_busy' in
the sec_req, and then invoke the callback function of the user
to ensure that the thread is woken up before releasing the req
memory.

log likes:
[ 169.457050][ T1354] Call trace:
[ 169.460285][ T1354] dump_backtrace+0x0/0x300
[ 169.464780][ T1354] show_stack+0x20/0x30
[ 169.468936][ T1354] dump_stack+0x104/0x17c
[ 169.473240][ T1354] print_address_description.constprop.0+0x68/0x204
[ 169.479889][ T1354] __kasan_report+0xe0/0x140
[ 169.484463][ T1354] kasan_report+0x44/0xe0
[ 169.488771][ T1354] __asan_load8+0x94/0xd0
[ 169.493088][ T1354] __list_del_entry_valid+0x20/0x180
[ 169.498408][ T1354] sec_back_req_clear+0x184/0x2dc [hisi_sec2]
[ 169.504494][ T1354] sec_skcipher_callback+0x84/0x150 [hisi_sec2]
[ 169.510800][ T1354] sec_req_cb+0x1d4/0x334 [hisi_sec2]
[ 169.516227][ T1354] qm_poll_req_cb+0x170/0x20c [hisi_qm]
[ 169.524821][ T1354] qm_work_process+0xf8/0x124 [hisi_qm]
[ 169.533436][ T1354] process_one_work+0x3a8/0x860
[ 169.541063][ T1354] worker_thread+0x280/0x670
[ 169.548349][ T1354] kthread+0x18c/0x1d0
[ 169.555169][ T1354] ret_from_fork+0x10/0x18
[ 169.562107][ T1354]

Signed-off-by: default avatarwangyuan <wangyuan46@huawei.com>
parent 1f098345
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -39,7 +39,6 @@ struct sec_req {
	struct sec_qp_ctx *qp_ctx;

	struct sec_cipher_req c_req;
	struct list_head backlog_head;

	int err_type;
	int req_id;
@@ -89,7 +88,6 @@ struct sec_qp_ctx {
	struct sec_alg_res res[QM_Q_DEPTH];
	struct sec_ctx *ctx;
	spinlock_t req_lock;
	struct list_head backlog;
	struct hisi_acc_sgl_pool *c_in_pool;
	struct hisi_acc_sgl_pool *c_out_pool;
};
+5 −32
Original line number Diff line number Diff line
@@ -180,10 +180,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
		req->fake_busy = true;
		spin_unlock_bh(&qp_ctx->req_lock);
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
		spin_unlock_bh(&qp_ctx->req_lock);
		return -EBUSY;
	}
	spin_unlock_bh(&qp_ctx->req_lock);
@@ -322,7 +322,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type)

	spin_lock_init(&qp_ctx->req_lock);
	idr_init(&qp_ctx->req_idr);
	INIT_LIST_HEAD(&qp_ctx->backlog);

	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
						     SEC_SGL_SGE_NR);
@@ -832,31 +831,10 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
		dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}

static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
				struct sec_qp_ctx *qp_ctx)
{
	struct sec_req *backlog_req = NULL;

	spin_lock_bh(&qp_ctx->req_lock);
	if (ctx->fake_req_limit >=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !list_empty(&qp_ctx->backlog)) {
		backlog_req = list_first_entry(&qp_ctx->backlog,
				typeof(*backlog_req), backlog_head);
		list_del(&backlog_req->backlog_head);
	}
	spin_unlock_bh(&qp_ctx->req_lock);

	return backlog_req;
}

static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
				  int err)
{
	struct skcipher_request *sk_req = req->c_req.sk_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	struct skcipher_request *backlog_sk_req;
	struct sec_req *backlog_req;

	sec_free_req_id(req);

@@ -864,14 +842,8 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
	if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
		sec_update_iv(req, SEC_SKCIPHER);

	while (1) {
		backlog_req = sec_back_req_clear(ctx, qp_ctx);
		if (!backlog_req)
			break;

		backlog_sk_req = backlog_req->c_req.sk_req;
		backlog_sk_req->base.complete(&backlog_sk_req->base,
						-EINPROGRESS);
	if (req->fake_busy) {
		sk_req->base.complete(&sk_req->base, -EINPROGRESS);
		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
	}

@@ -1017,6 +989,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
	req->c_req.sk_req = sk_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;
	req->fake_busy = false;

	ret = sec_skcipher_param_check(ctx, req);
	if (unlikely(ret))