Unverified Commit deb75da8 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4006 crypto: hisilicon/sec2: fix memory use-after-free issue

parents 1f098345 7d47c655
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -39,7 +39,6 @@ struct sec_req {
	struct sec_qp_ctx *qp_ctx;

	struct sec_cipher_req c_req;
	struct list_head backlog_head;

	int err_type;
	int req_id;
@@ -89,7 +88,6 @@ struct sec_qp_ctx {
	struct sec_alg_res res[QM_Q_DEPTH];
	struct sec_ctx *ctx;
	spinlock_t req_lock;
	struct list_head backlog;
	struct hisi_acc_sgl_pool *c_in_pool;
	struct hisi_acc_sgl_pool *c_out_pool;
};
+5 −32
Original line number Diff line number Diff line
@@ -180,10 +180,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
		req->fake_busy = true;
		spin_unlock_bh(&qp_ctx->req_lock);
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
		spin_unlock_bh(&qp_ctx->req_lock);
		return -EBUSY;
	}
	spin_unlock_bh(&qp_ctx->req_lock);
@@ -322,7 +322,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type)

	spin_lock_init(&qp_ctx->req_lock);
	idr_init(&qp_ctx->req_idr);
	INIT_LIST_HEAD(&qp_ctx->backlog);

	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
						     SEC_SGL_SGE_NR);
@@ -832,31 +831,10 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
		dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}

static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
				struct sec_qp_ctx *qp_ctx)
{
	struct sec_req *backlog_req = NULL;

	spin_lock_bh(&qp_ctx->req_lock);
	if (ctx->fake_req_limit >=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !list_empty(&qp_ctx->backlog)) {
		backlog_req = list_first_entry(&qp_ctx->backlog,
				typeof(*backlog_req), backlog_head);
		list_del(&backlog_req->backlog_head);
	}
	spin_unlock_bh(&qp_ctx->req_lock);

	return backlog_req;
}

static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
				  int err)
{
	struct skcipher_request *sk_req = req->c_req.sk_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	struct skcipher_request *backlog_sk_req;
	struct sec_req *backlog_req;

	sec_free_req_id(req);

@@ -864,14 +842,8 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
	if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
		sec_update_iv(req, SEC_SKCIPHER);

	while (1) {
		backlog_req = sec_back_req_clear(ctx, qp_ctx);
		if (!backlog_req)
			break;

		backlog_sk_req = backlog_req->c_req.sk_req;
		backlog_sk_req->base.complete(&backlog_sk_req->base,
						-EINPROGRESS);
	if (req->fake_busy) {
		sk_req->base.complete(&sk_req->base, -EINPROGRESS);
		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
	}

@@ -1017,6 +989,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
	req->c_req.sk_req = sk_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;
	req->fake_busy = false;

	ret = sec_skcipher_param_check(ctx, req);
	if (unlikely(ret))