Commit d417a6ff authored by Martin K. Petersen's avatar Martin K. Petersen
Browse files

Merge patch series "lpfc: Update lpfc to revision 14.2.0.14"

Justin Tee <justintee8345@gmail.com> says:

Update lpfc to revision 14.2.0.14

This patch set contains logging improvements, kref handling fixes,
discovery bug fixes, and refactoring of repeated code.

The patches were cut against Martin's 6.6/scsi-queue tree.

Link: https://lore.kernel.org/r/20230712180522.112722-1-justintee8345@gmail.com


Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parents 109a2a48 71fe5dda
Loading
Loading
Loading
Loading
+20 −0
Original line number Diff line number Diff line
@@ -872,6 +872,7 @@ enum lpfc_irq_chann_mode {
enum lpfc_hba_bit_flags {
	FABRIC_COMANDS_BLOCKED,
	HBA_PCI_ERR,
	MBX_TMO_ERR,
};

struct lpfc_hba {
@@ -1708,6 +1709,25 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)

	return cpu_it;
}
/**
 * lpfc_next_present_cpu - Finds next present CPU after n
 * @n: the cpu prior to search
 *
 * Note: If no next present cpu, then fallback to first present cpu.
 *
 **/
static inline unsigned int lpfc_next_present_cpu(int n)
{
	unsigned int cpu;

	cpu = cpumask_next(n, cpu_present_mask);

	if (cpu >= nr_cpu_ids)
		cpu = cpumask_first(cpu_present_mask);

	return cpu;
}

/**
 * lpfc_sli4_mod_hba_eq_delay - update EQ delay
 * @phba: Pointer to HBA context object.
+101 −35
Original line number Diff line number Diff line
@@ -2127,11 +2127,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
		  uint32_t *mrpi, uint32_t *arpi,
		  uint32_t *mvpi, uint32_t *avpi)
{
	struct lpfc_mbx_read_config *rd_config;
	LPFC_MBOXQ_t *pmboxq;
	MAILBOX_t *pmb;
	int rc = 0;
	uint32_t max_vpi;
	struct lpfc_sli4_hba *sli4_hba;
	struct lpfc_max_cfg_param *max_cfg_param;
	u16 rsrc_ext_cnt, rsrc_ext_size, max_vpi;

	/*
	 * prevent udev from issuing mailbox commands until the port is
@@ -2167,31 +2168,65 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
	}

	if (phba->sli_rev == LPFC_SLI_REV4) {
		rd_config = &pmboxq->u.mqe.un.rd_config;
		sli4_hba = &phba->sli4_hba;
		max_cfg_param = &sli4_hba->max_cfg_param;

		/* Normally, extents are not used */
		if (!phba->sli4_hba.extents_in_use) {
			if (mrpi)
			*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
		if (arpi)
			*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
					phba->sli4_hba.max_cfg_param.rpi_used;
				*mrpi = max_cfg_param->max_rpi;
			if (mxri)
			*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
		if (axri)
			*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
					phba->sli4_hba.max_cfg_param.xri_used;
				*mxri = max_cfg_param->max_xri;
			if (mvpi) {
				max_vpi = max_cfg_param->max_vpi;

		/* Account for differences with SLI-3.  Get vpi count from
		 * mailbox data and subtract one for max vpi value.
		 */
		max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
			(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
				/* Limit the max we support */
				if (max_vpi > LPFC_MAX_VPI)
					max_vpi = LPFC_MAX_VPI;
				*mvpi = max_vpi;
			}
		} else { /* Extents in use */
			if (mrpi) {
				if (lpfc_sli4_get_avail_extnt_rsrc(phba,
								   LPFC_RSC_TYPE_FCOE_RPI,
								   &rsrc_ext_cnt,
								   &rsrc_ext_size)) {
					rc = 0;
					goto free_pmboxq;
				}

				*mrpi = rsrc_ext_cnt * rsrc_ext_size;
			}

			if (mxri) {
				if (lpfc_sli4_get_avail_extnt_rsrc(phba,
								   LPFC_RSC_TYPE_FCOE_XRI,
								   &rsrc_ext_cnt,
								   &rsrc_ext_size)) {
					rc = 0;
					goto free_pmboxq;
				}

				*mxri = rsrc_ext_cnt * rsrc_ext_size;
			}

			if (mvpi) {
				if (lpfc_sli4_get_avail_extnt_rsrc(phba,
								   LPFC_RSC_TYPE_FCOE_VPI,
								   &rsrc_ext_cnt,
								   &rsrc_ext_size)) {
					rc = 0;
					goto free_pmboxq;
				}

				max_vpi = rsrc_ext_cnt * rsrc_ext_size;

				/* Limit the max we support */
				if (max_vpi > LPFC_MAX_VPI)
					max_vpi = LPFC_MAX_VPI;
		if (mvpi)
				*mvpi = max_vpi;
		if (avpi)
			*avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
			}
		}
	} else {
		if (mrpi)
			*mrpi = pmb->un.varRdConfig.max_rpi;
@@ -2212,8 +2247,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
		}
	}

	/* Success */
	rc = 1;

free_pmboxq:
	mempool_free(pmboxq, phba->mbox_mem_pool);
	return 1;
	return rc;
}

/**
@@ -2265,10 +2304,19 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
	struct Scsi_Host  *shost = class_to_shost(dev);
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
	uint32_t cnt, acnt;
	struct lpfc_sli4_hba *sli4_hba;
	struct lpfc_max_cfg_param *max_cfg_param;
	u32 cnt = 0, acnt = 0;

	if (phba->sli_rev == LPFC_SLI_REV4) {
		sli4_hba = &phba->sli4_hba;
		max_cfg_param = &sli4_hba->max_cfg_param;
		return scnprintf(buf, PAGE_SIZE, "%d\n",
				 max_cfg_param->rpi_used);
	} else {
		if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
			return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
	}
	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}

@@ -2321,10 +2369,19 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
	struct Scsi_Host  *shost = class_to_shost(dev);
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
	uint32_t cnt, acnt;
	struct lpfc_sli4_hba *sli4_hba;
	struct lpfc_max_cfg_param *max_cfg_param;
	u32 cnt = 0, acnt = 0;

	if (phba->sli_rev == LPFC_SLI_REV4) {
		sli4_hba = &phba->sli4_hba;
		max_cfg_param = &sli4_hba->max_cfg_param;
		return scnprintf(buf, PAGE_SIZE, "%d\n",
				 max_cfg_param->xri_used);
	} else {
		if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
			return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
	}
	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}

@@ -2377,10 +2434,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
	struct Scsi_Host  *shost = class_to_shost(dev);
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
	uint32_t cnt, acnt;
	struct lpfc_sli4_hba *sli4_hba;
	struct lpfc_max_cfg_param *max_cfg_param;
	u32 cnt = 0, acnt = 0;

	if (phba->sli_rev == LPFC_SLI_REV4) {
		sli4_hba = &phba->sli4_hba;
		max_cfg_param = &sli4_hba->max_cfg_param;
		return scnprintf(buf, PAGE_SIZE, "%d\n",
				 max_cfg_param->vpi_used);
	} else {
		if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
			return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
	}
	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}

+14 −6
Original line number Diff line number Diff line
@@ -1557,7 +1557,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
			if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
			lpfc_printf_vlog(vport, KERN_INFO,
					 LOG_DISCOVERY | LOG_NODE,
					 "3064 Setting ndlp x%px, DID x%06x "
					 "with FC4 x%08x, Data: x%08x x%08x "
					 "%d\n",
@@ -1568,14 +1569,21 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
			if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
			    ndlp->nlp_fc4_type) {
				ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;

				/* This is a fabric topology so if discovery
				 * started with an unsolicited PLOGI, don't
				 * send a PRLI.  Targets don't issue PLOGI or
				 * PRLI when acting as a target. Likely this is
				 * an initiator function.
				 */
				if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
					lpfc_nlp_set_state(vport, ndlp,
							   NLP_STE_PRLI_ISSUE);
					lpfc_issue_els_prli(vport, ndlp, 0);
				}
			} else if (!ndlp->nlp_fc4_type) {
				/* If fc4 type is still unknown, then LOGO */
				lpfc_printf_vlog(vport, KERN_INFO,
						 LOG_DISCOVERY,
						 LOG_DISCOVERY | LOG_NODE,
						 "6443 Sending LOGO ndlp x%px,"
						 "DID x%06x with fc4_type: "
						 "x%08x, state: %d\n",
+42 −16
Original line number Diff line number Diff line
@@ -1041,7 +1041,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
		    !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
			lpfc_nlp_put(ndlp);

		lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
				 "0150 FLOGI failure Status:x%x/x%x "
				 "xri x%x TMO:x%x refcnt %d\n",
				 ulp_status, ulp_word4, cmdiocb->sli4_xritag,
@@ -1091,7 +1091,6 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
			if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
				lpfc_issue_reg_vfi(vport);

			lpfc_nlp_put(ndlp);
			goto out;
		}
		goto flogifail;
@@ -2377,10 +2376,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
		/* PRLI failed */
		lpfc_printf_vlog(vport, mode, loglevel,
				 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
				 "data: x%x x%x\n",
				 "data: x%x x%x x%x\n",
				 ndlp->nlp_DID, ulp_status,
				 ulp_word4, ndlp->nlp_state,
				 ndlp->fc4_prli_sent);
				 ndlp->fc4_prli_sent, ndlp->nlp_flag);

		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
		if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
@@ -2391,10 +2390,12 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
		 * mismatch typically caused by an RSCN. Skip any
		 * processing to allow recovery.
		 */
		if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
		    ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) {
		if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
		     ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
		    (ndlp->nlp_state == NLP_STE_NPR_NODE &&
		     ndlp->nlp_flag & NLP_DELAY_TMO)) {
			lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
					 "2784 PRLI cmpl: state mismatch "
					 "2784 PRLI cmpl: Allow Node recovery "
					 "DID x%06x nstate x%x nflag x%x\n",
					 ndlp->nlp_DID, ndlp->nlp_state,
					 ndlp->nlp_flag);
@@ -6166,11 +6167,25 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
			npr->TaskRetryIdReq = 1;
		}
		npr->acceptRspCode = PRLI_REQ_EXECUTED;

		/* Set image pair for complementary pairs only. */
		if (ndlp->nlp_type & NLP_FCP_TARGET)
			npr->estabImagePair = 1;
		else
			npr->estabImagePair = 0;
		npr->readXferRdyDis = 1;
		npr->ConfmComplAllowed = 1;
		npr->prliType = PRLI_FCP_TYPE;
		npr->initiatorFunc = 1;

		/* Xmit PRLI ACC response tag <ulpIoTag> */
		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
				 "6014 FCP issue PRLI ACC imgpair %d "
				 "retry %d task %d\n",
				 npr->estabImagePair,
				 npr->Retry, npr->TaskRetryIdReq);

	} else if (prli_fc4_req == PRLI_NVME_TYPE) {
		/* Respond with an NVME PRLI Type */
		npr_nvme = (struct lpfc_nvme_prli *) pcmd;
@@ -9588,11 +9603,13 @@ void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
	LIST_HEAD(abort_list);
	LIST_HEAD(cancel_list);
	struct lpfc_hba  *phba = vport->phba;
	struct lpfc_sli_ring *pring;
	struct lpfc_iocbq *tmp_iocb, *piocb;
	u32 ulp_command;
	unsigned long iflags = 0;
	bool mbx_tmo_err;

	lpfc_fabric_abort_vport(vport);

@@ -9614,15 +9631,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
	if (phba->sli_rev == LPFC_SLI_REV4)
		spin_lock(&pring->ring_lock);

	mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
	/* First we need to issue aborts to outstanding cmds on txcmpl */
	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
		if (piocb->cmd_flag & LPFC_IO_LIBDFC)
		if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
			continue;

		if (piocb->vport != vport)
			continue;

		if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
		if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
			continue;

		/* On the ELS ring we can have ELS_REQUESTs or
@@ -9641,8 +9659,8 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
			 */
			if (phba->link_state == LPFC_LINK_DOWN)
				piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
		}
		if (ulp_command == CMD_GEN_REQUEST64_CR)
		} else if (ulp_command == CMD_GEN_REQUEST64_CR ||
			   mbx_tmo_err)
			list_add_tail(&piocb->dlist, &abort_list);
	}

@@ -9654,9 +9672,17 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
		spin_lock_irqsave(&phba->hbalock, iflags);
		list_del_init(&piocb->dlist);
		if (mbx_tmo_err)
			list_move_tail(&piocb->list, &cancel_list);
		else
			lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);

		spin_unlock_irqrestore(&phba->hbalock, iflags);
	}
	if (!list_empty(&cancel_list))
		lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
				      IOERR_SLI_ABORTED);
	else
		/* Make sure HBA is alive */
		lpfc_issue_hb_tmo(phba);

+52 −25
Original line number Diff line number Diff line
@@ -169,29 +169,44 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)

	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
			 "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
			 "load_flag x%x refcnt %d state %d xpt x%x\n",
			 "load_flag x%x refcnt %u state %d xpt x%x\n",
			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
			 vport->load_flag, kref_read(&ndlp->kref),
			 ndlp->nlp_state, ndlp->fc4_xpt_flags);

	/* Don't schedule a worker thread event if the vport is going down.
	 * The teardown process cleans up the node via lpfc_drop_node.
	 */
	/* Don't schedule a worker thread event if the vport is going down. */
	if (vport->load_flag & FC_UNLOADING) {
		((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
		spin_lock_irqsave(&ndlp->lock, iflags);
		ndlp->rport = NULL;

		/* The scsi_transport is done with the rport so lpfc cannot
		 * call to unregister. Remove the scsi transport reference
		 * and clean up the SCSI transport node details.
		 */
		if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
			ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
		/* clear the NLP_XPT_REGD if the node is not registered
		 * with nvme-fc

			/* NVME transport-registered rports need the
			 * NLP_XPT_REGD flag to complete an unregister.
			 */
		if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
			if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
				ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
			spin_unlock_irqrestore(&ndlp->lock, iflags);
			lpfc_nlp_put(ndlp);
			spin_lock_irqsave(&ndlp->lock, iflags);
		}

		/* Remove the node reference from remote_port_add now.
		 * The driver will not call remote_port_delete.
		/* Only 1 thread can drop the initial node reference.  If
		 * another thread has set NLP_DROPPED, this thread is done.
		 */
		if (!(ndlp->nlp_flag & NLP_DROPPED)) {
			ndlp->nlp_flag |= NLP_DROPPED;
			spin_unlock_irqrestore(&ndlp->lock, iflags);
			lpfc_nlp_put(ndlp);
			spin_lock_irqsave(&ndlp->lock, iflags);
		}

		spin_unlock_irqrestore(&ndlp->lock, iflags);
		return;
	}

@@ -4686,7 +4701,8 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
	spin_lock_irqsave(&ndlp->lock, iflags);
	if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
		spin_unlock_irqrestore(&ndlp->lock, iflags);
		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
				 "0999 %s Not regd: ndlp x%px rport x%px DID "
				 "x%x FLG x%x XPT x%x\n",
				  __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
@@ -4702,9 +4718,10 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
		vport->phba->nport_event_cnt++;
		lpfc_unregister_remote_port(ndlp);
	} else if (!ndlp->rport) {
		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
		lpfc_printf_vlog(vport, KERN_INFO,
				 LOG_ELS | LOG_NODE | LOG_DISCOVERY,
				 "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
				 " XPT x%x refcnt %d\n",
				 " XPT x%x refcnt %u\n",
				 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
				 ndlp->fc4_xpt_flags,
				 kref_read(&ndlp->kref));
@@ -4954,23 +4971,30 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
	/*
	 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
	 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
	 * the ndlp from the vport. The ndlp marked as UNUSED on the list
	 * until ALL other outstanding threads have completed. We check
	 * that the ndlp not already in the UNUSED state before we proceed.
	 * be used when lpfc wants to remove the "last" lpfc_nlp_put() to
	 * release the ndlp from the vport when conditions are correct.
	 */
	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
		return;
	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
	ndlp->nlp_flag |= NLP_DROPPED;
	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
		lpfc_cleanup_vports_rrqs(vport, ndlp);
		lpfc_unreg_rpi(vport, ndlp);
	}

	/* NLP_DROPPED means another thread already removed the initial
	 * reference from lpfc_nlp_init.  If set, don't drop it again and
	 * introduce an imbalance.
	 */
	spin_lock_irq(&ndlp->lock);
	if (!(ndlp->nlp_flag & NLP_DROPPED)) {
		ndlp->nlp_flag |= NLP_DROPPED;
		spin_unlock_irq(&ndlp->lock);
		lpfc_nlp_put(ndlp);
		return;
	}
	spin_unlock_irq(&ndlp->lock);
}

/*
 * Start / ReStart rescue timer for Discovery / RSCN handling
@@ -5757,8 +5781,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
			     (NLP_FCP_TARGET | NLP_NVME_TARGET)))
				return NULL;

			if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
			    ndlp->nlp_state < NLP_STE_NPR_NODE) {
				lpfc_disc_state_machine(vport, ndlp, NULL,
							NLP_EVT_DEVICE_RECOVERY);
			}

			spin_lock_irq(&ndlp->lock);
			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
Loading