Commit 890a2fb0 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-6.3-2022-03-16' of git://git.infradead.org/nvme into block-6.3

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 6.3

 - avoid potential UAF in nvmet_req_complete (Damien Le Moal)
 - more quirks (Elmer Miroslav Mosher Golovin, Philipp Geulen)
 - fix a memory leak in the nvme-pci probe teardown path (Irvin Cote)
 - repair the MAINTAINERS entry (Lukas Bulwahn)
 - fix handling single range discard request (Ming Lei)
 - show more opcode names in trace events (Minwoo Im)
 - fix nvme-tcp timeout reporting (Sagi Grimberg)"

* tag 'nvme-6.3-2022-03-16' of git://git.infradead.org/nvme:
  nvmet: avoid potential UAF in nvmet_req_complete()
  nvme-trace: show more opcode names
  nvme-tcp: add nvme-tcp pdu size build protection
  nvme-tcp: fix opcode reporting in the timeout handler
  nvme-pci: add NVME_QUIRK_BOGUS_NID for Lexar NM620
  nvme-pci: add NVME_QUIRK_BOGUS_NID for Netac NV3000
  nvme-pci: fixing memory leak in probe teardown path
  nvme: fix handling single range discard request
  MAINTAINERS: repair malformed T: entries in NVM EXPRESS DRIVERS
parents 23e5b930 6173a77b
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -14872,12 +14872,12 @@ M: Sagi Grimberg <sagi@grimberg.me>
L:	linux-nvme@lists.infradead.org
S:	Supported
W:	http://git.infradead.org/nvme.git
T:	git://git.infradead.org/nvme.git
T:	git git://git.infradead.org/nvme.git
F:	Documentation/nvme/
F:	drivers/nvme/host/
F:	drivers/nvme/common/
F:	include/linux/nvme.h
F:	drivers/nvme/host/
F:	include/linux/nvme-*.h
F:	include/linux/nvme.h
F:	include/uapi/linux/nvme_ioctl.h
NVM EXPRESS FABRICS AUTHENTICATION
@@ -14912,7 +14912,7 @@ M: Chaitanya Kulkarni <kch@nvidia.com>
L:	linux-nvme@lists.infradead.org
S:	Supported
W:	http://git.infradead.org/nvme.git
T:	git://git.infradead.org/nvme.git
T:	git git://git.infradead.org/nvme.git
F:	drivers/nvme/target/
NVMEM FRAMEWORK
+19 −9
Original line number Diff line number Diff line
@@ -781,6 +781,15 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
		range = page_address(ns->ctrl->discard_page);
	}

	if (queue_max_discard_segments(req->q) == 1) {
		u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
		u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);

		range[0].cattr = cpu_to_le32(0);
		range[0].nlb = cpu_to_le32(nlb);
		range[0].slba = cpu_to_le64(slba);
		n = 1;
	} else {
		__rq_for_each_bio(bio, req) {
			u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
			u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
@@ -792,6 +801,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
			}
			n++;
		}
	}

	if (WARN_ON_ONCE(n != segments)) {
		if (virt_to_page(range) == ns->ctrl->discard_page)
+5 −0
Original line number Diff line number Diff line
@@ -3073,6 +3073,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	nvme_dev_unmap(dev);
out_uninit_ctrl:
	nvme_uninit_ctrl(&dev->ctrl);
	nvme_put_ctrl(&dev->ctrl);
	return result;
}

@@ -3415,6 +3416,8 @@ static const struct pci_device_id nvme_id_table[] = {
		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
	{ PCI_DEVICE(0x2646, 0x501E),   /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
	{ PCI_DEVICE(0x1f40, 0x1202),   /* Netac Technologies Co. NV3000 NVMe SSD */
		.driver_data = NVME_QUIRK_BOGUS_NID, },
	{ PCI_DEVICE(0x1f40, 0x5236),   /* Netac Technologies Co. NV7000 NVMe SSD */
		.driver_data = NVME_QUIRK_BOGUS_NID, },
	{ PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
@@ -3435,6 +3438,8 @@ static const struct pci_device_id nvme_id_table[] = {
		.driver_data = NVME_QUIRK_BOGUS_NID, },
	{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
		.driver_data = NVME_QUIRK_BOGUS_NID, },
	{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
		.driver_data = NVME_QUIRK_BOGUS_NID, },
	{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
		.driver_data = NVME_QUIRK_BOGUS_NID, },
	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+27 −6
Original line number Diff line number Diff line
@@ -208,6 +208,18 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}

static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
{
	return req->pdu;
}

static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
{
	/* use the pdu space in the back for the data pdu */
	return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
		sizeof(struct nvme_tcp_data_pdu);
}

static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
	if (nvme_is_fabrics(req->req.cmd))
@@ -614,7 +626,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,

static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{
	struct nvme_tcp_data_pdu *data = req->pdu;
	struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
	struct nvme_tcp_queue *queue = req->queue;
	struct request *rq = blk_mq_rq_from_pdu(req);
	u32 h2cdata_sent = req->pdu_len;
@@ -1038,7 +1050,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
{
	struct nvme_tcp_queue *queue = req->queue;
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
	bool inline_data = nvme_tcp_has_inline_data(req);
	u8 hdgst = nvme_tcp_hdgst_len(queue);
	int len = sizeof(*pdu) + hdgst - req->offset;
@@ -1077,7 +1089,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
{
	struct nvme_tcp_queue *queue = req->queue;
	struct nvme_tcp_data_pdu *pdu = req->pdu;
	struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
	u8 hdgst = nvme_tcp_hdgst_len(queue);
	int len = sizeof(*pdu) - req->offset + hdgst;
	int ret;
@@ -2284,7 +2296,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
	u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
	int qid = nvme_tcp_queue_id(req->queue);

@@ -2323,7 +2335,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
			struct request *rq)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
	struct nvme_command *c = &pdu->cmd;

	c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -2343,7 +2355,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
		struct request *rq)
{
	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
	struct nvme_tcp_cmd_pdu *pdu = req->pdu;
	struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
	struct nvme_tcp_queue *queue = req->queue;
	u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
	blk_status_t ret;
@@ -2682,6 +2694,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = {

static int __init nvme_tcp_init_module(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
	BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
	BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
	BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
	BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
	BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
	BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
	BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);

	nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
	if (!nvme_tcp_wq)
+3 −1
Original line number Diff line number Diff line
@@ -756,8 +756,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)

void nvmet_req_complete(struct nvmet_req *req, u16 status)
{
	struct nvmet_sq *sq = req->sq;

	__nvmet_req_complete(req, status);
	percpu_ref_put(&req->sq->ref);
	percpu_ref_put(&sq->ref);
}
EXPORT_SYMBOL_GPL(nvmet_req_complete);

Loading