Commit 2d4185ae authored by Zhengchao Shao's avatar Zhengchao Shao Committed by Yongqiang Liu
Browse files

crypto: hisilicon/sec - don't sleep when in softirq

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5GEVR


CVE: NA

--------------------------------

When kunpeng920 encryption driver is used to deencrypt and decrypt
packets during the softirq, it is not allowed to use mutex lock. The
kernel will report the following error:

BUG: scheduling while atomic: swapper/57/0/0x00000300
Call trace:
dump_backtrace+0x0/0x1e4
show_stack+0x20/0x2c
dump_stack+0xd8/0x140
__schedule_bug+0x68/0x80
__schedule+0x728/0x840
schedule+0x50/0xe0
schedule_preempt_disabled+0x18/0x24
__mutex_lock.constprop.0+0x594/0x5dc
__mutex_lock_slowpath+0x1c/0x30
mutex_lock+0x50/0x60
sec_request_init+0x8c/0x1a0 [hisi_sec2]
sec_process+0x28/0x1ac [hisi_sec2]
sec_skcipher_crypto+0xf4/0x1d4 [hisi_sec2]
sec_skcipher_encrypt+0x1c/0x30 [hisi_sec2]
crypto_skcipher_encrypt+0x2c/0x40
crypto_authenc_encrypt+0xc8/0xfc [authenc]
crypto_aead_encrypt+0x2c/0x40
echainiv_encrypt+0x144/0x1a0 [echainiv]
crypto_aead_encrypt+0x2c/0x40
esp_output_tail+0x348/0x5c0 [esp4]
esp_output+0x120/0x19c [esp4]
xfrm_output_one+0x25c/0x4d4
xfrm_output_resume+0x6c/0x1fc
xfrm_output+0xac/0x3c0
xfrm4_output+0x64/0x130
ip_build_and_send_pkt+0x158/0x20c
tcp_v4_send_synack+0xdc/0x1f0
tcp_conn_request+0x7d0/0x994
tcp_v4_conn_request+0x58/0x6c
tcp_v6_conn_request+0xf0/0x100
tcp_rcv_state_process+0x1cc/0xd60
tcp_v4_do_rcv+0x10c/0x250
tcp_v4_rcv+0xfc4/0x10a4
ip_protocol_deliver_rcu+0xf4/0x200
ip_local_deliver_finish+0x58/0x70
ip_local_deliver+0x68/0x120
ip_sublist_rcv_finish+0x70/0x94
ip_list_rcv_finish.constprop.0+0x17c/0x1d0
ip_sublist_rcv+0x40/0xb0
ip_list_rcv+0x140/0x1dc
__netif_receive_skb_list_core+0x154/0x28c
__netif_receive_skb_list+0x120/0x1a0
netif_receive_skb_list_internal+0xe4/0x1f0
napi_complete_done+0x70/0x1f0
gro_cell_poll+0x9c/0xb0
napi_poll+0xcc/0x264
net_rx_action+0xd4/0x21c
__do_softirq+0x130/0x358
irq_exit+0x11c/0x13c
__handle_domain_irq+0x88/0xf0
gic_handle_irq+0x78/0x2c0
el1_irq+0xb8/0x140
arch_cpu_idle+0x18/0x40
default_idle_call+0x5c/0x1c0
cpuidle_idle_call+0x174/0x1b0
do_idle+0xc8/0x160
cpu_startup_entry+0x30/0x11c
secondary_start_kernel+0x158/0x1e4
softirq: huh, entered softirq 3 NET_RX 0000000093774ee4 with
preempt_count 00000100, exited with fffffe00?

Fixes: 416d8220 ("crypto: hisilicon - add HiSilicon SEC V2 driver")
Signed-off-by: default avatarZhengchao Shao <shaozhengchao@huawei.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Reviewed-by: default avatarYue Haibing <yuehaibing@huawei.com>
Reviewed-by: default avatarWei Yongjun <weiyongjun1@huawei.com>
Signed-off-by: default avatarYongqiang Liu <liuyongqiang13@huawei.com>
parent ffc9dfba
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ struct sec_qp_ctx {
	struct idr req_idr;
	struct sec_alg_res res[QM_Q_DEPTH];
	struct sec_ctx *ctx;
	struct mutex req_lock;
	spinlock_t req_lock;
	struct list_head backlog;
	struct hisi_acc_sgl_pool *c_in_pool;
	struct hisi_acc_sgl_pool *c_out_pool;
+10 −10
Original line number Diff line number Diff line
@@ -87,11 +87,11 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
	int req_id;

	mutex_lock(&qp_ctx->req_lock);
	spin_lock_bh(&qp_ctx->req_lock);

	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
				  0, QM_Q_DEPTH, GFP_ATOMIC);
	mutex_unlock(&qp_ctx->req_lock);
	spin_unlock_bh(&qp_ctx->req_lock);
	if (unlikely(req_id < 0)) {
		dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
		return req_id;
@@ -115,9 +115,9 @@ static void sec_free_req_id(struct sec_req *req)
	qp_ctx->req_list[req_id] = NULL;
	req->qp_ctx = NULL;

	mutex_lock(&qp_ctx->req_lock);
	spin_lock_bh(&qp_ctx->req_lock);
	idr_remove(&qp_ctx->req_idr, req_id);
	mutex_unlock(&qp_ctx->req_lock);
	spin_unlock_bh(&qp_ctx->req_lock);
}

static void sec_req_cb(struct hisi_qp *qp, void *resp)
@@ -176,17 +176,17 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
		return -EBUSY;

	mutex_lock(&qp_ctx->req_lock);
	spin_lock_bh(&qp_ctx->req_lock);
	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
		mutex_unlock(&qp_ctx->req_lock);
		spin_unlock_bh(&qp_ctx->req_lock);
		return -EBUSY;
	}
	mutex_unlock(&qp_ctx->req_lock);
	spin_unlock_bh(&qp_ctx->req_lock);

	if (unlikely(ret == -EBUSY))
		return -ENOBUFS;
@@ -320,7 +320,7 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type)
	qp_ctx->qp = qp;
	qp_ctx->ctx = ctx;

	mutex_init(&qp_ctx->req_lock);
	spin_lock_init(&qp_ctx->req_lock);
	idr_init(&qp_ctx->req_idr);
	INIT_LIST_HEAD(&qp_ctx->backlog);

@@ -837,7 +837,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
{
	struct sec_req *backlog_req = NULL;

	mutex_lock(&qp_ctx->req_lock);
	spin_lock_bh(&qp_ctx->req_lock);
	if (ctx->fake_req_limit >=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !list_empty(&qp_ctx->backlog)) {
@@ -845,7 +845,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
				typeof(*backlog_req), backlog_head);
		list_del(&backlog_req->backlog_head);
	}
	mutex_unlock(&qp_ctx->req_lock);
	spin_unlock_bh(&qp_ctx->req_lock);

	return backlog_req;
}