Commit 455944e4 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-6.3-2023-02-07' of git://git.infradead.org/nvme into for-6.3/block

Pull NVMe updates from Christoph:

"nvme updates for Linux 6.3

 - small improvements to the logging functionality (Amit Engel)
 - authentication cleanups (Hannes Reinecke)
 - cleanup and optimize the DMA mapping cod in the PCIe driver
   (Keith Busch)
 - work around the command effects for Format NVM (Keith Busch)
 - misc cleanups (Keith Busch, Christoph Hellwig)"

* tag 'nvme-6.3-2023-02-07' of git://git.infradead.org/nvme:
  nvme: mask CSE effects for security receive
  nvme: always initialize known command effects
  nvmet: for nvme admin set_features cmd, call nvmet_check_data_len_lte()
  nvme-tcp: add additional info for nvme_tcp_timeout log
  nvme: add nvme_opcode_str function for all nvme cmd types
  nvme: remove nvme_execute_passthru_rq
  nvme-pci: place descriptor addresses in iod
  nvme-pci: use mapped entries for sgl decision
  nvme-pci: remove SGL segment descriptors
  nvme-auth: don't use NVMe status codes
  nvme-fabrics: clarify AUTHREQ result handling
parents 1972d038 baff6491
Loading
Loading
Loading
Loading
+15 −15
Original line number Diff line number Diff line
@@ -158,7 +158,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,

	if (size > CHAP_BUF_SIZE) {
		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
		return NVME_SC_INVALID_FIELD;
		return -EINVAL;
	}

	hmac_name = nvme_auth_hmac_name(data->hashid);
@@ -167,7 +167,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
			 "qid %d: invalid HASH ID %d\n",
			 chap->qid, data->hashid);
		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
		return NVME_SC_INVALID_FIELD;
		return -EPROTO;
	}

	if (chap->hash_id == data->hashid && chap->shash_tfm &&
@@ -193,7 +193,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
			 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
		chap->shash_tfm = NULL;
		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
		return NVME_SC_AUTH_REQUIRED;
		return -ENOMEM;
	}

	if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
@@ -203,7 +203,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
		crypto_free_shash(chap->shash_tfm);
		chap->shash_tfm = NULL;
		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
		return NVME_SC_AUTH_REQUIRED;
		return -EPROTO;
	}

	chap->hash_id = data->hashid;
@@ -219,7 +219,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
			 chap->qid, data->dhgid);
		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
		/* Leave previous dh_tfm intact */
		return NVME_SC_AUTH_REQUIRED;
		return -EPROTO;
	}

	if (chap->dhgroup_id == data->dhgid &&
@@ -242,7 +242,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
				 "qid %d: empty DH value\n",
				 chap->qid);
			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
			return NVME_SC_INVALID_FIELD;
			return -EPROTO;
		}

		chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
@@ -254,7 +254,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
				 chap->qid, ret, gid_name);
			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
			chap->dh_tfm = NULL;
			return NVME_SC_AUTH_REQUIRED;
			return -ret;
		}
		dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
			chap->qid, gid_name);
@@ -263,7 +263,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
			 "qid %d: invalid DH value for NULL DH\n",
			 chap->qid);
		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
		return NVME_SC_INVALID_FIELD;
		return -EPROTO;
	}
	chap->dhgroup_id = data->dhgid;

@@ -274,7 +274,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
		chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
		if (!chap->ctrl_key) {
			chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
			return NVME_SC_AUTH_REQUIRED;
			return -ENOMEM;
		}
		chap->ctrl_key_len = dhvlen;
		memcpy(chap->ctrl_key, data->cval + chap->hash_len,
@@ -344,7 +344,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,

	if (size > CHAP_BUF_SIZE) {
		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
		return NVME_SC_INVALID_FIELD;
		return -EINVAL;
	}

	if (data->hl != chap->hash_len) {
@@ -352,7 +352,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
			 "qid %d: invalid hash length %u\n",
			 chap->qid, data->hl);
		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
		return NVME_SC_INVALID_FIELD;
		return -EPROTO;
	}

	/* Just print out information for the admin queue */
@@ -376,7 +376,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
			 "qid %d: controller authentication failed\n",
			 chap->qid);
		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
		return NVME_SC_AUTH_REQUIRED;
		return -ECONNREFUSED;
	}

	/* Just print out information for the admin queue */
@@ -730,7 +730,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
					 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
	if (ret) {
		chap->status = ret;
		chap->error = NVME_SC_AUTH_REQUIRED;
		chap->error = -ECONNREFUSED;
		return;
	}

@@ -798,7 +798,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
					 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
	if (ret) {
		chap->status = ret;
		chap->error = NVME_SC_AUTH_REQUIRED;
		chap->error = -ECONNREFUSED;
		return;
	}

@@ -819,7 +819,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
	ret = nvme_auth_process_dhchap_success1(ctrl, chap);
	if (ret) {
		/* Controller authentication failed */
		chap->error = NVME_SC_AUTH_REQUIRED;
		chap->error = -ECONNREFUSED;
		goto fail2;
	}

+16 −0
Original line number Diff line number Diff line
@@ -54,6 +54,14 @@ static const char * const nvme_admin_ops[] = {
	[nvme_admin_get_lba_status] = "Get LBA Status",
};

static const char * const nvme_fabrics_ops[] = {
	[nvme_fabrics_type_property_set] = "Property Set",
	[nvme_fabrics_type_property_get] = "Property Get",
	[nvme_fabrics_type_connect] = "Connect",
	[nvme_fabrics_type_auth_send] = "Authentication Send",
	[nvme_fabrics_type_auth_receive] = "Authentication Receive",
};

static const char * const nvme_statuses[] = {
	[NVME_SC_SUCCESS] = "Success",
	[NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
@@ -185,3 +193,11 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
		return nvme_admin_ops[opcode];
	return "Unknown";
}
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);

const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
	if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
		return nvme_fabrics_ops[opcode];
	return "Unknown";
}
EXPORT_SYMBOL_GPL(nvme_get_fabrics_opcode_str);
+66 −53
Original line number Diff line number Diff line
@@ -1002,7 +1002,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
 * >0: nvme controller's cqe status response
 * <0: kernel error in lieu of controller response
 */
static int nvme_execute_rq(struct request *rq, bool at_head)
int nvme_execute_rq(struct request *rq, bool at_head)
{
	blk_status_t status;

@@ -1013,6 +1013,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
		return nvme_req(rq)->status;
	return blk_status_to_errno(status);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);

/*
 * Returns 0 on success.  If the result is negative, it's a Linux error code;
@@ -1058,41 +1059,12 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);

static u32 nvme_known_admin_effects(u8 opcode)
{
	switch (opcode) {
	case nvme_admin_format_nvm:
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
			NVME_CMD_EFFECTS_CSE_MASK;
	case nvme_admin_sanitize_nvm:
		return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
	default:
		break;
	}
	return 0;
}

static u32 nvme_known_nvm_effects(u8 opcode)
{
	switch (opcode) {
	case nvme_cmd_write:
	case nvme_cmd_write_zeroes:
	case nvme_cmd_write_uncor:
		 return NVME_CMD_EFFECTS_LBCC;
	default:
		return 0;
	}
}

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
	u32 effects = 0;

	if (ns) {
		if (ns->head->effects)
		effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
		if (ns->head->ids.csi == NVME_CSI_NVM)
			effects |= nvme_known_nvm_effects(opcode);
		if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
			dev_warn_once(ctrl->device,
				"IO command:%02x has unusual effects:%08x\n",
@@ -1105,17 +1077,14 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
		 */
		effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
	} else {
		if (ctrl->effects)
		effects = le32_to_cpu(ctrl->effects->acs[opcode]);
		effects |= nvme_known_admin_effects(opcode);
	}

	return effects;
}
EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);

static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			       u8 opcode)
u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
	u32 effects = nvme_command_effects(ctrl, ns, opcode);

@@ -1133,6 +1102,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
	}
	return effects;
}
EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);

void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
		       struct nvme_command *cmd, int status)
@@ -1174,17 +1144,6 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
}
EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);

int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
	struct nvme_ns *ns = rq->q->queuedata;

	*effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
	return nvme_execute_rq(rq, false);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);

/*
 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
 * 
@@ -3120,6 +3079,62 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
	return ret;
}

static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
{
	struct nvme_effects_log	*log = ctrl->effects;

	log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
						NVME_CMD_EFFECTS_NCC |
						NVME_CMD_EFFECTS_CSE_MASK);
	log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
						NVME_CMD_EFFECTS_CSE_MASK);

	/*
	 * The spec says the result of a security receive command depends on
	 * the previous security send command. As such, many vendors log this
	 * command as one to submitted only when no other commands to the same
	 * namespace are outstanding. The intention is to tell the host to
	 * prevent mixing security send and receive.
	 *
	 * This driver can only enforce such exclusive access against IO
	 * queues, though. We are not readily able to enforce such a rule for
	 * two commands to the admin queue, which is the only queue that
	 * matters for this command.
	 *
	 * Rather than blindly freezing the IO queues for this effect that
	 * doesn't even apply to IO, mask it off.
	 */
	log->acs[nvme_admin_security_recv] &= ~NVME_CMD_EFFECTS_CSE_MASK;

	log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
	log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
	log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
}

static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
	int ret = 0;

	if (ctrl->effects)
		return 0;

	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
		if (ret < 0)
			return ret;
	}

	if (!ctrl->effects) {
		ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
		if (!ctrl->effects)
			return -ENOMEM;
		xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
	}

	nvme_init_known_nvm_effects(ctrl);
	return 0;
}

static int nvme_init_identify(struct nvme_ctrl *ctrl)
{
	struct nvme_id_ctrl *id;
@@ -3133,12 +3148,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
		return -EIO;
	}

	if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
		ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
		if (ret < 0)
			goto out_free;
	}

	if (!(ctrl->ops->flags & NVME_F_FABRICS))
		ctrl->cntlid = le16_to_cpu(id->cntlid);

@@ -3161,6 +3170,10 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
		ret = nvme_init_subsystem(ctrl, id);
		if (ret)
			goto out_free;

		ret = nvme_init_effects(ctrl, id);
		if (ret)
			goto out_free;
	}
	memcpy(ctrl->subsys->firmware_rev, id->fr,
	       sizeof(ctrl->subsys->firmware_rev));
+17 −2
Original line number Diff line number Diff line
@@ -410,7 +410,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)

	result = le32_to_cpu(res.u32);
	ctrl->cntlid = result & 0xFFFF;
	if ((result >> 16) & 0x3) {
	if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
		/* Secure concatenation is not implemented */
		if (result & NVME_CONNECT_AUTHREQ_ASCR) {
			dev_warn(ctrl->device,
				 "qid 0: secure concatenation is not supported\n");
			ret = NVME_SC_AUTH_REQUIRED;
			goto out_free_data;
		}
		/* Authentication required */
		ret = nvme_auth_negotiate(ctrl, 0);
		if (ret) {
@@ -486,7 +493,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
				       &cmd, data);
	}
	result = le32_to_cpu(res.u32);
	if ((result >> 16) & 2) {
	if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
		/* Secure concatenation is not implemented */
		if (result & NVME_CONNECT_AUTHREQ_ASCR) {
			dev_warn(ctrl->device,
				 "qid 0: secure concatenation is not supported\n");
			ret = NVME_SC_AUTH_REQUIRED;
			goto out_free_data;
		}
		/* Authentication required */
		ret = nvme_auth_negotiate(ctrl, qid);
		if (ret) {
@@ -500,6 +514,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
					 "qid %u: authentication failed\n", qid);
		}
	}
out_free_data:
	kfree(data);
	return ret;
}
+3 −2
Original line number Diff line number Diff line
@@ -219,6 +219,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
		void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
		u64 *result, unsigned timeout, unsigned int flags)
{
	struct nvme_ns *ns = q->queuedata;
	struct nvme_ctrl *ctrl;
	struct request *req;
	void *meta = NULL;
@@ -241,8 +242,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
	bio = req->bio;
	ctrl = nvme_req(req)->ctrl;

	ret = nvme_execute_passthru_rq(req, &effects);

	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
	ret = nvme_execute_rq(req, false);
	if (result)
		*result = le64_to_cpu(nvme_req(req)->result.u64);
	if (meta)
Loading