Commit eb336f0c authored by Chao Leng's avatar Chao Leng Committed by Zheng Zengkai
Browse files

nvme-tcp: avoid request double completion for concurrent nvme_tcp_timeout



stable inclusion
from stable-5.10.14
commit 82ae0714c344ac707900aacd9805773474059282
bugzilla: 48051

--------------------------------

[ Upstream commit 9ebbfe49 ]

Each name space has a request queue, if complete request long time,
multi request queues may have time out requests at the same time,
nvme_tcp_timeout will execute concurrently. Multi requests in different
request queues may be queued in the same tcp queue, multi
nvme_tcp_timeout may call nvme_tcp_stop_queue at the same time.
The first nvme_tcp_stop_queue will clear NVME_TCP_Q_LIVE and continue
stopping the tcp queue(cancel io_work), but the others check
NVME_TCP_Q_LIVE is already cleared, and then directly complete the
requests, complete request before the io work is completely canceled may
lead to a use-after-free condition.
Add a multex lock to serialize nvme_tcp_stop_queue.

Signed-off-by: default avatarChao Leng <lengchao@huawei.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
Acked-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
parent 1181a462
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
	struct work_struct	io_work;
	int			io_cpu;

	struct mutex		queue_lock;
	struct mutex		send_mutex;
	struct llist_head	req_list;
	struct list_head	send_list;
@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)

	sock_release(queue->sock);
	kfree(queue->pdu);
	mutex_destroy(&queue->queue_lock);
}

static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
	int ret, rcv_pdu_size;

	mutex_init(&queue->queue_lock);
	queue->ctrl = ctrl;
	init_llist_head(&queue->req_list);
	INIT_LIST_HEAD(&queue->send_list);
@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
	if (ret) {
		dev_err(nctrl->device,
			"failed to create socket: %d\n", ret);
		return ret;
		goto err_destroy_mutex;
	}

	/* Single syn retry */
@@ -1507,6 +1510,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
err_sock:
	sock_release(queue->sock);
	queue->sock = NULL;
err_destroy_mutex:
	mutex_destroy(&queue->queue_lock);
	return ret;
}

@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
	struct nvme_tcp_queue *queue = &ctrl->queues[qid];

	if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
		return;
	mutex_lock(&queue->queue_lock);
	if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
		__nvme_tcp_stop_queue(queue);
	mutex_unlock(&queue->queue_lock);
}

static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)