Commit cb2369cb authored by Weili Qian's avatar Weili Qian Committed by JiangShui
Browse files

crypto: hisilicon/sec2: fix memory use-after-free issue

driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8LFYK


CVE: NA

----------------------------------------------------------------------

When the number of packets being processed in the hardware
queue is greater than 512, the SEC driver continues to send the
packet to the hardware, but adds the packet to the backlog list.
Then, the SEC driver returns -EBUSY to the caller, and the caller
stops sending packets. When the number of packets in the queue queried
in the callback is less than 512, The packet sending thread is woken up.

When the number of send packet threads is greater than 512, packages in the
backlog may be complete but the packet is not deleted from list. The
released memory is accessed during the deletion, causing a system panic.

Therefore, delete the backlog, determine whether the packet sending thread
needs to be woken up based on 'fake_busy' in the sec_req, and then invoke
the callback function of the user to ensure that the thread is woken up
before releasing the req memory.

log likes:
[ 169.430697][ T1354] CPU: 27 PID: 1354 Comm: kworker/u262:1 Kdump: loaded Not tainted 5.10.0+ #1
[ 169.439678][ T1354] Hardware name: Huawei TaiShan 200 (Model 2280)/BC82AMDD, BIOS 2280-V2 CS V5.B211.01 11/10/2021
[ 169.450421][ T1354] Workqueue: 0000:76:00.0 qm_work_process [hisi_qm]
[ 169.457050][ T1354] Call trace:
[ 169.460285][ T1354] dump_backtrace+0x0/0x300
[ 169.464780][ T1354] show_stack+0x20/0x30
[ 169.468936][ T1354] dump_stack+0x104/0x17c
[ 169.473240][ T1354] print_address_description.constprop.0+0x68/0x204
[ 169.479889][ T1354] __kasan_report+0xe0/0x140
[ 169.484463][ T1354] kasan_report+0x44/0xe0
[ 169.488771][ T1354] __asan_load8+0x94/0xd0
[ 169.493088][ T1354] __list_del_entry_valid+0x20/0x180
[ 169.498408][ T1354] sec_back_req_clear+0x184/0x2dc [hisi_sec2]
[ 169.504494][ T1354] sec_skcipher_callback+0x84/0x150 [hisi_sec2]
[ 169.510800][ T1354] sec_req_cb+0x1d4/0x334 [hisi_sec2]
[ 169.516227][ T1354] qm_poll_req_cb+0x170/0x20c [hisi_qm]
[ 169.524821][ T1354] qm_work_process+0xf8/0x124 [hisi_qm]
[ 169.533436][ T1354] process_one_work+0x3a8/0x860
[ 169.541063][ T1354] worker_thread+0x280/0x670
[ 169.548349][ T1354] kthread+0x18c/0x1d0
[ 169.555169][ T1354] ret_from_fork+0x10/0x18
[ 169.562107][ T1354]

Signed-off-by: default avatarWeili Qian <qianweili@huawei.com>
Signed-off-by: default avatarJiangShui Yang <yangjiangshui@h-partners.com>
parent 39014ae9
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -55,7 +55,6 @@ struct sec_req {
	dma_addr_t in_dma;
	struct sec_cipher_req c_req;
	struct sec_aead_req aead_req;
	struct list_head backlog_head;

	int err_type;
	int req_id;
@@ -121,7 +120,6 @@ struct sec_qp_ctx {
	struct sec_alg_res *res;
	struct sec_ctx *ctx;
	spinlock_t req_lock;
	struct list_head backlog;
	struct hisi_acc_sgl_pool *c_in_pool;
	struct hisi_acc_sgl_pool *c_out_pool;
};
+8 −43
Original line number Diff line number Diff line
@@ -288,10 +288,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
		req->fake_busy = true;
		spin_unlock_bh(&qp_ctx->req_lock);
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
		spin_unlock_bh(&qp_ctx->req_lock);
		return -EBUSY;
	}
	spin_unlock_bh(&qp_ctx->req_lock);
@@ -557,7 +557,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)

	spin_lock_init(&qp_ctx->req_lock);
	idr_init(&qp_ctx->req_idr);
	INIT_LIST_HEAD(&qp_ctx->backlog);

	ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
	if (ret)
@@ -1399,31 +1398,10 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
	}
}

static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
				struct sec_qp_ctx *qp_ctx)
{
	struct sec_req *backlog_req = NULL;

	spin_lock_bh(&qp_ctx->req_lock);
	if (ctx->fake_req_limit >=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !list_empty(&qp_ctx->backlog)) {
		backlog_req = list_first_entry(&qp_ctx->backlog,
				typeof(*backlog_req), backlog_head);
		list_del(&backlog_req->backlog_head);
	}
	spin_unlock_bh(&qp_ctx->req_lock);

	return backlog_req;
}

static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
				  int err)
{
	struct skcipher_request *sk_req = req->c_req.sk_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	struct skcipher_request *backlog_sk_req;
	struct sec_req *backlog_req;

	sec_free_req_id(req);

@@ -1432,14 +1410,8 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
	    ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
		sec_update_iv(req, SEC_SKCIPHER);

	while (1) {
		backlog_req = sec_back_req_clear(ctx, qp_ctx);
		if (!backlog_req)
			break;

		backlog_sk_req = backlog_req->c_req.sk_req;
		backlog_sk_req->base.complete(&backlog_sk_req->base,
						-EINPROGRESS);
	if (req->fake_busy) {
		sk_req->base.complete(&sk_req->base, -EINPROGRESS);
		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
	}

@@ -1685,9 +1657,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
	struct sec_aead_req *aead_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	size_t authsize = crypto_aead_authsize(tfm);
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	struct aead_request *backlog_aead_req;
	struct sec_req *backlog_req;
	size_t sz;

	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
@@ -1709,14 +1678,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)

	sec_free_req_id(req);

	while (1) {
		backlog_req = sec_back_req_clear(c, qp_ctx);
		if (!backlog_req)
			break;

		backlog_aead_req = backlog_req->aead_req.aead_req;
		backlog_aead_req->base.complete(&backlog_aead_req->base,
						-EINPROGRESS);
	if (req->fake_busy) {
		a_req->base.complete(&a_req->base, -EINPROGRESS);
		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
	}

@@ -2104,6 +2067,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
	req->c_req.sk_req = sk_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;
	req->fake_busy = false;

	ret = sec_skcipher_param_check(ctx, req);
	if (unlikely(ret))
@@ -2385,6 +2349,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
	req->aead_req.aead_req = a_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;
	req->fake_busy = false;

	ret = sec_aead_param_check(ctx, req);
	if (unlikely(ret)) {