Commit 07db0563 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull more SCSI updates from James Bottomley:
 "This is a set of minor fixes in various drivers (qla2xxx, ufs,
  scsi_debug, lpfc) one doc fix and a fairly large update to the fnic
  driver to remove the open coded iteration functions in favour of the
  scsi provided ones"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: fnic: Use scsi_host_busy_iter() to traverse commands
  scsi: fnic: Kill 'exclude_id' argument to fnic_cleanup_io()
  scsi: scsi_debug: Fix cmd_per_lun, set to max_queue
  scsi: ufs: core: Narrow down fast path in system suspend path
  scsi: ufs: core: Cancel rpm_dev_flush_recheck_work during system suspend
  scsi: ufs: core: Do not put UFS power into LPM if link is broken
  scsi: qla2xxx: Prevent PRLI in target mode
  scsi: qla2xxx: Add marginal path handling support
  scsi: target: tcmu: Return from tcmu_handle_completions() if cmd_id not found
  scsi: ufs: core: Fix a typo in ufs-sysfs.c
  scsi: lpfc: Fix bad memory access during VPD DUMP mailbox command
  scsi: lpfc: Fix DMA virtual address ptr assignment in bsg
  scsi: lpfc: Fix illegal memory access on Abort IOCBs
  scsi: blk-mq: Fix build warning when making htmldocs
parents 0f979d81 35ffbb60
Loading
Loading
Loading
Loading
+377 −451
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ static const char *fnic_fcpio_status_to_str(unsigned int status)
	return fcpio_status_str[status];
}

static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
static void fnic_cleanup_io(struct fnic *fnic);

static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
					    struct scsi_cmnd *sc)
@@ -638,7 +638,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
	atomic64_inc(&reset_stats->fw_reset_completions);

	/* Clean up all outstanding io requests */
	fnic_cleanup_io(fnic, SCSI_NO_TAG);
	fnic_cleanup_io(fnic);

	atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
	atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
@@ -1361,27 +1361,18 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
	return wq_work_done;
}

static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
				 bool reserved)
{
	int i;
	struct fnic *fnic = data;
	struct fnic_io_req *io_req;
	unsigned long flags = 0;
	struct scsi_cmnd *sc;
	spinlock_t *io_lock;
	unsigned long start_time = 0;
	struct fnic_stats *fnic_stats = &fnic->fnic_stats;

	for (i = 0; i < fnic->fnic_max_tag_id; i++) {
		if (i == exclude_id)
			continue;

		io_lock = fnic_io_lock_tag(fnic, i);
	io_lock = fnic_io_lock_tag(fnic, sc->request->tag);
	spin_lock_irqsave(io_lock, flags);
		sc = scsi_host_find_tag(fnic->lport->host, i);
		if (!sc) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}

	io_req = (struct fnic_io_req *)CMD_SP(sc);
	if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
@@ -1396,14 +1387,14 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
		else if (io_req && io_req->abts_done)
			complete(io_req->abts_done);
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	} else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}
	if (!io_req) {
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		goto cleanup_scsi_cmd;
	}

	CMD_SP(sc) = NULL;
@@ -1418,11 +1409,11 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
	fnic_release_ioreq_buf(fnic, io_req, sc);
	mempool_free(io_req, fnic->io_req_pool);

cleanup_scsi_cmd:
	sc->result = DID_TRANSPORT_DISRUPTED << 16;
	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
			      __func__, sc->request->tag, sc,
			      (jiffies - start_time));
		      "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
		      sc->request->tag, sc, (jiffies - start_time));

	if (atomic64_read(&fnic->io_cmpl_skip))
		atomic64_dec(&fnic->io_cmpl_skip);
@@ -1437,7 +1428,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
				     sc->request->tag, sc);

		FNIC_TRACE(fnic_cleanup_io,
				  sc->device->host->host_no, i, sc,
			   sc->device->host->host_no, sc->request->tag, sc,
			   jiffies_to_msecs(jiffies - start_time),
			   0, ((u64)sc->cmnd[0] << 32 |
			       (u64)sc->cmnd[2] << 24 |
@@ -1447,7 +1438,13 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)

		sc->scsi_done(sc);
	}
	return true;
}

static void fnic_cleanup_io(struct fnic *fnic)
{
	scsi_host_busy_iter(fnic->lport->host,
			    fnic_cleanup_io_iter, fnic);
}

void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
@@ -1558,43 +1555,34 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
	return 0;
}

static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
struct fnic_rport_abort_io_iter_data {
	struct fnic *fnic;
	u32 port_id;
	int term_cnt;
};

static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
				     bool reserved)
{
	int tag;
	int abt_tag;
	int term_cnt = 0;
	struct fnic_rport_abort_io_iter_data *iter_data = data;
	struct fnic *fnic = iter_data->fnic;
	int abt_tag = sc->request->tag;
	struct fnic_io_req *io_req;
	spinlock_t *io_lock;
	unsigned long flags;
	struct scsi_cmnd *sc;
	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
	struct scsi_lun fc_lun;
	enum fnic_ioreq_state old_ioreq_state;

	FNIC_SCSI_DBG(KERN_DEBUG,
		      fnic->lport->host,
		      "fnic_rport_exch_reset called portid 0x%06x\n",
		      port_id);

	if (fnic->in_remove)
		return;

	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
		abt_tag = tag;
		io_lock = fnic_io_lock_tag(fnic, tag);
	io_lock = fnic_io_lock_tag(fnic, abt_tag);
	spin_lock_irqsave(io_lock, flags);
		sc = scsi_host_find_tag(fnic->lport->host, tag);
		if (!sc) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}

	io_req = (struct fnic_io_req *)CMD_SP(sc);

		if (!io_req || io_req->port_id != port_id) {
	if (!io_req || io_req->port_id != iter_data->port_id) {
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}

	if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
@@ -1603,7 +1591,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
			"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
			sc);
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}

	/*
@@ -1612,7 +1600,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
	 */
	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}
	if (io_req->abts_done) {
		shost_printk(KERN_ERR, fnic->lport->host,
@@ -1626,19 +1614,17 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
			     "rport_exch_reset "
			     "IO not yet issued %p tag 0x%x flags "
			     "%x state %d\n",
				  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
			     sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc));
	}
	old_ioreq_state = CMD_STATE(sc);
	CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
	CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
	if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
		atomic64_inc(&reset_stats->device_reset_terminates);
			abt_tag = (tag | FNIC_TAG_DEV_RST);
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			"fnic_rport_exch_reset dev rst sc 0x%p\n",
			sc);
		abt_tag |= FNIC_TAG_DEV_RST;
	}

	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
		      "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
	BUG_ON(io_req->abts_done);

	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -1670,31 +1656,40 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
			CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
		spin_unlock_irqrestore(io_lock, flags);
		atomic64_inc(&term_stats->terminates);
			term_cnt++;
		iter_data->term_cnt++;
	}
	return true;
}
	if (term_cnt > atomic64_read(&term_stats->max_terminates))
		atomic64_set(&term_stats->max_terminates, term_cnt);

static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
	struct fnic_rport_abort_io_iter_data iter_data = {
		.fnic = fnic,
		.port_id = port_id,
		.term_cnt = 0,
	};

	FNIC_SCSI_DBG(KERN_DEBUG,
		      fnic->lport->host,
		      "fnic_rport_exch_reset called portid 0x%06x\n",
		      port_id);

	if (fnic->in_remove)
		return;

	scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
			    &iter_data);
	if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
		atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);

}

void fnic_terminate_rport_io(struct fc_rport *rport)
{
	int tag;
	int abt_tag;
	int term_cnt = 0;
	struct fnic_io_req *io_req;
	spinlock_t *io_lock;
	unsigned long flags;
	struct scsi_cmnd *sc;
	struct scsi_lun fc_lun;
	struct fc_rport_libfc_priv *rdata;
	struct fc_lport *lport;
	struct fnic *fnic;
	struct fc_rport *cmd_rport;
	struct reset_stats *reset_stats;
	struct terminate_stats *term_stats;
	enum fnic_ioreq_state old_ioreq_state;

	if (!rport) {
		printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
@@ -1722,108 +1717,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
	if (fnic->in_remove)
		return;

	reset_stats = &fnic->fnic_stats.reset_stats;
	term_stats = &fnic->fnic_stats.term_stats;

	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
		abt_tag = tag;
		io_lock = fnic_io_lock_tag(fnic, tag);
		spin_lock_irqsave(io_lock, flags);
		sc = scsi_host_find_tag(fnic->lport->host, tag);
		if (!sc) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}

		io_req = (struct fnic_io_req *)CMD_SP(sc);
		if (!io_req) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}

		cmd_rport = starget_to_rport(scsi_target(sc->device));
		if (rport != cmd_rport) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}

		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
			(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			"fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
			sc);
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}
		/*
		 * Found IO that is still pending with firmware and
		 * belongs to rport that went away
		 */
		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}
		if (io_req->abts_done) {
			shost_printk(KERN_ERR, fnic->lport->host,
			"fnic_terminate_rport_io: io_req->abts_done is set "
			"state is %s\n",
			fnic_ioreq_state_to_str(CMD_STATE(sc)));
		}
		if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
				  "fnic_terminate_rport_io "
				  "IO not yet issued %p tag 0x%x flags "
				  "%x state %d\n",
				  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
		}
		old_ioreq_state = CMD_STATE(sc);
		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
			atomic64_inc(&reset_stats->device_reset_terminates);
			abt_tag = (tag | FNIC_TAG_DEV_RST);
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
		}

		BUG_ON(io_req->abts_done);

		FNIC_SCSI_DBG(KERN_DEBUG,
			      fnic->lport->host,
			      "fnic_terminate_rport_io: Issuing abts\n");

		spin_unlock_irqrestore(io_lock, flags);

		/* Now queue the abort command to firmware */
		int_to_scsilun(sc->device->lun, &fc_lun);

		if (fnic_queue_abort_io_req(fnic, abt_tag,
					    FCPIO_ITMF_ABT_TASK_TERM,
					    fc_lun.scsi_lun, io_req)) {
			/*
			 * Revert the cmd state back to old state, if
			 * it hasn't changed in between. This cmd will get
			 * aborted later by scsi_eh, or cleaned up during
			 * lun reset
			 */
			spin_lock_irqsave(io_lock, flags);
			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
				CMD_STATE(sc) = old_ioreq_state;
			spin_unlock_irqrestore(io_lock, flags);
		} else {
			spin_lock_irqsave(io_lock, flags);
			if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
			else
				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
			spin_unlock_irqrestore(io_lock, flags);
			atomic64_inc(&term_stats->terminates);
			term_cnt++;
		}
	}
	if (term_cnt > atomic64_read(&term_stats->max_terminates))
		atomic64_set(&term_stats->max_terminates, term_cnt);

	fnic_rport_exch_reset(fnic, rport->port_id);
}

/*
@@ -2118,46 +2012,38 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
	return ret;
}

/*
 * Clean up any pending aborts on the lun
 * For each outstanding IO on this lun, whose abort is not completed by fw,
 * issue a local abort. Wait for abort to complete. Return 0 if all commands
 * successfully aborted, 1 otherwise
 */
static int fnic_clean_pending_aborts(struct fnic *fnic,
				     struct scsi_cmnd *lr_sc,
					 bool new_sc)
struct fnic_pending_aborts_iter_data {
	struct fnic *fnic;
	struct scsi_cmnd *lr_sc;
	struct scsi_device *lun_dev;
	int ret;
};

static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
				     void *data, bool reserved)
{
	int tag, abt_tag;
	struct fnic_pending_aborts_iter_data *iter_data = data;
	struct fnic *fnic = iter_data->fnic;
	struct scsi_device *lun_dev = iter_data->lun_dev;
	int abt_tag = sc->request->tag;
	struct fnic_io_req *io_req;
	spinlock_t *io_lock;
	unsigned long flags;
	int ret = 0;
	struct scsi_cmnd *sc;
	struct scsi_lun fc_lun;
	struct scsi_device *lun_dev = lr_sc->device;
	DECLARE_COMPLETION_ONSTACK(tm_done);
	enum fnic_ioreq_state old_ioreq_state;

	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
		io_lock = fnic_io_lock_tag(fnic, tag);
		spin_lock_irqsave(io_lock, flags);
		sc = scsi_host_find_tag(fnic->lport->host, tag);
		/*
		 * ignore this lun reset cmd if issued using new SC
		 * or cmds that do not belong to this lun
		 */
		if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}
	if (sc == iter_data->lr_sc || sc->device != lun_dev)
		return true;
	if (reserved)
		return true;

	io_lock = fnic_io_lock_tag(fnic, abt_tag);
	spin_lock_irqsave(io_lock, flags);
	io_req = (struct fnic_io_req *)CMD_SP(sc);

		if (!io_req || sc->device != lun_dev) {
	if (!io_req) {
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}

	/*
@@ -2170,7 +2056,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,

	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}
	if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
	    (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
@@ -2178,7 +2064,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
			      "%s dev rst not pending sc 0x%p\n", __func__,
			      sc);
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}

	if (io_req->abts_done)
@@ -2197,7 +2083,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,

	BUG_ON(io_req->abts_done);

		abt_tag = tag;
	if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
		abt_tag |= FNIC_TAG_DEV_RST;
		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
@@ -2221,8 +2106,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
			CMD_STATE(sc) = old_ioreq_state;
		spin_unlock_irqrestore(io_lock, flags);
			ret = 1;
			goto clean_pending_aborts_end;
		iter_data->ret = FAILED;
		return false;
	} else {
		spin_lock_irqsave(io_lock, flags);
		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
@@ -2231,8 +2116,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
	}
	CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;

		wait_for_completion_timeout(&tm_done,
					    msecs_to_jiffies
	wait_for_completion_timeout(&tm_done, msecs_to_jiffies
				    (fnic->config.ed_tov));

	/* Recheck cmd state to check if it is now aborted */
@@ -2241,7 +2125,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
	if (!io_req) {
		spin_unlock_irqrestore(io_lock, flags);
		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
			continue;
		return true;
	}

	io_req->abts_done = NULL;
@@ -2250,18 +2134,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
	if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
		spin_unlock_irqrestore(io_lock, flags);
		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
			ret = 1;
			goto clean_pending_aborts_end;
		iter_data->ret = FAILED;
		return false;
	}
	CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;

	/* original sc used for lr is handled by dev reset code */
		if (sc != lr_sc)
	if (sc != iter_data->lr_sc)
		CMD_SP(sc) = NULL;
	spin_unlock_irqrestore(io_lock, flags);

	/* original sc used for lr is handled by dev reset code */
		if (sc != lr_sc) {
	if (sc != iter_data->lr_sc) {
		fnic_release_ioreq_buf(fnic, io_req, sc);
		mempool_free(io_req, fnic->io_req_pool);
	}
@@ -2275,8 +2159,36 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
		sc->result = DID_RESET << 16;
		sc->scsi_done(sc);
	}
	return true;
}

/*
 * Clean up any pending aborts on the lun
 * For each outstanding IO on this lun, whose abort is not completed by fw,
 * issue a local abort. Wait for abort to complete. Return 0 if all commands
 * successfully aborted, 1 otherwise
 */
static int fnic_clean_pending_aborts(struct fnic *fnic,
				     struct scsi_cmnd *lr_sc,
				     bool new_sc)

{
	int ret = SUCCESS;
	struct fnic_pending_aborts_iter_data iter_data = {
		.fnic = fnic,
		.lun_dev = lr_sc->device,
		.ret = SUCCESS,
	};

	if (new_sc)
		iter_data.lr_sc = lr_sc;

	scsi_host_busy_iter(fnic->lport->host,
			    fnic_pending_aborts_iter, &iter_data);
	if (iter_data.ret == FAILED) {
		ret = iter_data.ret;
		goto clean_pending_aborts_end;
	}
	schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));

	/* walk again to check, if IOs are still pending in fw */
@@ -2775,44 +2687,32 @@ void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)

}

/*
 * fnic_is_abts_pending() is a helper function that
 * walks through tag map to check if there is any IOs pending,if there is one,
 * then it returns 1 (true), otherwise 0 (false)
 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
 * otherwise, it checks for all IOs.
 */
int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
				   bool reserved)
{
	int tag;
	struct fnic_pending_aborts_iter_data *iter_data = data;
	struct fnic *fnic = iter_data->fnic;
	int cmd_state;
	struct fnic_io_req *io_req;
	spinlock_t *io_lock;
	unsigned long flags;
	int ret = 0;
	struct scsi_cmnd *sc;
	struct scsi_device *lun_dev = NULL;

	if (lr_sc)
		lun_dev = lr_sc->device;

	/* walk again to check, if IOs are still pending in fw */
	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
		sc = scsi_host_find_tag(fnic->lport->host, tag);
	/*
	 * ignore this lun reset cmd or cmds that do not belong to
	 * this lun
	 */
		if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
			continue;
	if (iter_data->lr_sc && sc == iter_data->lr_sc)
		return true;
	if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
		return true;

	io_lock = fnic_io_lock_hash(fnic, sc);
	spin_lock_irqsave(io_lock, flags);

	io_req = (struct fnic_io_req *)CMD_SP(sc);

		if (!io_req || sc->device != lun_dev) {
	if (!io_req) {
		spin_unlock_irqrestore(io_lock, flags);
			continue;
		return true;
	}

	/*
@@ -2822,11 +2722,37 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
	FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
		      "Found IO in %s on lun\n",
		      fnic_ioreq_state_to_str(CMD_STATE(sc)));

		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
			ret = 1;
	cmd_state = CMD_STATE(sc);
	spin_unlock_irqrestore(io_lock, flags);
	if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
		iter_data->ret = 1;

	return iter_data->ret ? false : true;
}

	return ret;
/*
 * fnic_is_abts_pending() is a helper function that
 * walks through tag map to check if there is any IOs pending,if there is one,
 * then it returns 1 (true), otherwise 0 (false)
 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
 * otherwise, it checks for all IOs.
 */
int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
{
	struct fnic_pending_aborts_iter_data iter_data = {
		.fnic = fnic,
		.lun_dev = NULL,
		.ret = 0,
	};

	if (lr_sc) {
		iter_data.lun_dev = lr_sc->device;
		iter_data.lr_sc = lr_sc;
	}

	/* walk again to check, if IOs are still pending in fw */
	scsi_host_busy_iter(fnic->lport->host,
			    fnic_abts_pending_iter, &iter_data);

	return iter_data.ret;
}
+1 −1
Original line number Diff line number Diff line
@@ -934,7 +934,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
	INIT_LIST_HEAD(&head);
	list_add_tail(&head, &piocbq->list);

	ct_req = (struct lpfc_sli_ct_request *)bdeBuf1;
	ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
	evt_req_id = ct_req->FsType;
	cmd = ct_req->CommandResponse.bits.CmdRsp;

+6 −6
Original line number Diff line number Diff line
@@ -254,13 +254,13 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
		if (mb->un.varDmp.word_cnt == 0)
			break;

		i =  mb->un.varDmp.word_cnt * sizeof(uint32_t);
		if (offset + i >  DMP_VPD_SIZE)
			i =  DMP_VPD_SIZE - offset;
		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
				      lpfc_vpd_data  + offset, i);
		offset += i;
	} while (offset < DMP_VPD_SIZE);
				      lpfc_vpd_data + offset,
				      mb->un.varDmp.word_cnt);
		offset += mb->un.varDmp.word_cnt;
	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);

	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);

+17 −9
Original line number Diff line number Diff line
@@ -11804,13 +11804,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
			   lpfc_ctx_cmd ctx_cmd)
{
	struct lpfc_io_buf *lpfc_cmd;
	IOCB_t *icmd = NULL;
	int rc = 1;
	if (!iocbq || iocbq->vport != vport)
		return rc;
	if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
	    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
	    !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
	      iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
		return rc;
	icmd = &iocbq->iocb;
	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
	    icmd->ulpCommand == CMD_CLOSE_XRI_CN)
		return rc;
	lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
@@ -19770,7 +19777,7 @@ lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
	LPFC_MBOXQ_t *pmb = NULL;
	MAILBOX_t *mb;
	uint32_t offset = 0;
	int i, rc;
	int rc;
	if (!rgn23_data)
		return 0;
@@ -19801,13 +19808,14 @@ lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
		if (mb->un.varDmp.word_cnt == 0)
			break;
		i =  mb->un.varDmp.word_cnt * sizeof(uint32_t);
		if (offset + i >  DMP_RGN23_SIZE)
			i =  DMP_RGN23_SIZE - offset;
		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
				      rgn23_data  + offset, i);
		offset += i;
	} while (offset < DMP_RGN23_SIZE);
				       rgn23_data + offset,
				       mb->un.varDmp.word_cnt);
		offset += mb->un.varDmp.word_cnt;
	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
	mempool_free(pmb, phba->mbox_mem_pool);
	return offset;
+3 −0
Original line number Diff line number Diff line
@@ -1195,6 +1195,9 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
	struct qla_work_evt *e;

	if (vha->host->active_mode == MODE_TARGET)
		return QLA_FUNCTION_FAILED;

	e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
	if (!e)
		return QLA_FUNCTION_FAILED;
Loading