Commit 75c523ac authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-6.0-2022-09-08' of git://git.infradead.org/nvme into block-6.0

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 6.1

 - fix a use after free in nvmet (Bart Van Assche)
 - fix a use after free when detecting digest errors (Sagi Grimberg)
 - fix regression that causes sporadic TCP requests to time out
   (Sagi Grimberg)
 - fix two off by ones errors in the nvmet ZNS support
   (Dennis Maisenbacher)
 - requeue aen after firmware activation (Keith Busch)"

* tag 'nvme-6.0-2022-09-08' of git://git.infradead.org/nvme:
  nvme: requeue aen after firmware activation
  nvmet: fix mar and mor off-by-one errors
  nvme-tcp: fix regression that causes sporadic requests to time out
  nvme-tcp: fix UAF when detecting digest errors
  nvmet: fix a use-after-free
parents 748008e1 371a982c
Loading
Loading
Loading
Loading
+11 −3
Original line number Diff line number Diff line
@@ -4702,6 +4702,8 @@ static void nvme_fw_act_work(struct work_struct *work)
	nvme_start_queues(ctrl);
	/* read FW slot information to clear the AER */
	nvme_get_fw_slot_info(ctrl);

	queue_work(nvme_wq, &ctrl->async_event_work);
}

static u32 nvme_aer_type(u32 result)
@@ -4714,9 +4716,10 @@ static u32 nvme_aer_subtype(u32 result)
	return (result & 0xff00) >> 8;
}

static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
	u32 aer_notice_type = nvme_aer_subtype(result);
	bool requeue = true;

	trace_nvme_async_event(ctrl, aer_notice_type);

@@ -4733,6 +4736,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
		 */
		if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
			nvme_auth_stop(ctrl);
			requeue = false;
			queue_work(nvme_wq, &ctrl->fw_act_work);
		}
		break;
@@ -4749,6 +4753,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
	default:
		dev_warn(ctrl->device, "async event result %08x\n", result);
	}
	return requeue;
}

static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
@@ -4764,13 +4769,14 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
	u32 result = le32_to_cpu(res->u32);
	u32 aer_type = nvme_aer_type(result);
	u32 aer_subtype = nvme_aer_subtype(result);
	bool requeue = true;

	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
		return;

	switch (aer_type) {
	case NVME_AER_NOTICE:
		nvme_handle_aen_notice(ctrl, result);
		requeue = nvme_handle_aen_notice(ctrl, result);
		break;
	case NVME_AER_ERROR:
		/*
@@ -4791,6 +4797,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
	default:
		break;
	}

	if (requeue)
		queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+2 −5
Original line number Diff line number Diff line
@@ -121,7 +121,6 @@ struct nvme_tcp_queue {
	struct mutex		send_mutex;
	struct llist_head	req_list;
	struct list_head	send_list;
	bool			more_requests;

	/* recv state */
	void			*pdu;
@@ -320,7 +319,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
	return !list_empty(&queue->send_list) ||
		!llist_empty(&queue->req_list) || queue->more_requests;
		!llist_empty(&queue->req_list);
}

static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
@@ -339,9 +338,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
	 */
	if (queue->io_cpu == raw_smp_processor_id() &&
	    sync && empty && mutex_trylock(&queue->send_mutex)) {
		queue->more_requests = !last;
		nvme_tcp_send_all(queue);
		queue->more_requests = false;
		mutex_unlock(&queue->send_mutex);
	}

@@ -1229,7 +1226,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
		else if (unlikely(result < 0))
			return;

		if (!pending)
		if (!pending || !queue->rd_enabled)
			return;

	} while (!time_after(jiffies, deadline)); /* quota is exhausted */
+4 −2
Original line number Diff line number Diff line
@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)

static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
	struct nvmet_ns *ns = req->ns;

	if (!req->sq->sqhd_disabled)
		nvmet_update_sq_head(req);
	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)

	trace_nvmet_req_complete(req);

	if (req->ns)
		nvmet_put_namespace(req->ns);
	req->ops->queue_response(req);
	if (ns)
		nvmet_put_namespace(ns);
}

void nvmet_req_complete(struct nvmet_req *req, u16 status)
+15 −2
Original line number Diff line number Diff line
@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
	struct nvme_id_ns_zns *id_zns;
	u64 zsze;
	u16 status;
	u32 mar, mor;

	if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
		req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
	zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
					req->ns->blksize_shift;
	id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
	id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
	id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));

	mor = bdev_max_open_zones(req->ns->bdev);
	if (!mor)
		mor = U32_MAX;
	else
		mor--;
	id_zns->mor = cpu_to_le32(mor);

	mar = bdev_max_active_zones(req->ns->bdev);
	if (!mar)
		mar = U32_MAX;
	else
		mar--;
	id_zns->mar = cpu_to_le32(mar);

done:
	status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));