Commit e8cfe8fa authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull SCSI fixes from James Bottomley:
 "Seven fixes, all in drivers (qla2xxx, mkt3sas, qedi, target,
  ibmvscsi).

  The most serious are the target pscsi oom and the qla2xxx revert which
  can otherwise cause a use after free"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: target: pscsi: Clean up after failure in pscsi_map_sg()
  scsi: target: pscsi: Avoid OOM in pscsi_map_sg()
  scsi: mpt3sas: Fix error return code of mpt3sas_base_attach()
  scsi: qedi: Fix error return code of qedi_alloc_global_queues()
  scsi: Revert "qla2xxx: Make sure that aborted commands are freed"
  scsi: ibmvfc: Make ibmvfc_wait_for_ops() MQ aware
  scsi: ibmvfc: Fix potential race in ibmvfc_wait_for_ops()
parents 0f4498ce 36fa766f
Loading
Loading
Loading
Loading
+54 −13
Original line number Diff line number Diff line
@@ -2371,6 +2371,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
	return 0;
}

/**
 * ibmvfc_event_is_free - Check if event is free or not
 * @evt:	ibmvfc event struct
 *
 * Returns:
 *	true / false
 **/
static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
{
	struct ibmvfc_event *loop_evt;

	list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
		if (loop_evt == evt)
			return true;

	return false;
}

/**
 * ibmvfc_wait_for_ops - Wait for ops to complete
 * @vhost:	ibmvfc host struct
@@ -2385,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
{
	struct ibmvfc_event *evt;
	DECLARE_COMPLETION_ONSTACK(comp);
	int wait;
	int wait, i, q_index, q_size;
	unsigned long flags;
	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
	struct ibmvfc_queue *queues;

	ENTER;
	if (vhost->mq_enabled && vhost->using_channels) {
		queues = vhost->scsi_scrqs.scrqs;
		q_size = vhost->scsi_scrqs.active_queues;
	} else {
		queues = &vhost->crq;
		q_size = 1;
	}

	do {
		wait = 0;
		spin_lock_irqsave(&vhost->crq.l_lock, flags);
		list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
		spin_lock_irqsave(vhost->host->host_lock, flags);
		for (q_index = 0; q_index < q_size; q_index++) {
			spin_lock(&queues[q_index].l_lock);
			for (i = 0; i < queues[q_index].evt_pool.size; i++) {
				evt = &queues[q_index].evt_pool.events[i];
				if (!ibmvfc_event_is_free(evt)) {
					if (match(evt, device)) {
						evt->eh_comp = &comp;
						wait++;
					}
				}
		spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
			}
			spin_unlock(&queues[q_index].l_lock);
		}
		spin_unlock_irqrestore(vhost->host->host_lock, flags);

		if (wait) {
			timeout = wait_for_completion_timeout(&comp, timeout);

			if (!timeout) {
				wait = 0;
				spin_lock_irqsave(&vhost->crq.l_lock, flags);
				list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
				spin_lock_irqsave(vhost->host->host_lock, flags);
				for (q_index = 0; q_index < q_size; q_index++) {
					spin_lock(&queues[q_index].l_lock);
					for (i = 0; i < queues[q_index].evt_pool.size; i++) {
						evt = &queues[q_index].evt_pool.events[i];
						if (!ibmvfc_event_is_free(evt)) {
							if (match(evt, device)) {
								evt->eh_comp = NULL;
								wait++;
							}
						}
				spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
					}
					spin_unlock(&queues[q_index].l_lock);
				}
				spin_unlock_irqrestore(vhost->host->host_lock, flags);
				if (wait)
					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
				LEAVE;
+6 −2
Original line number Diff line number Diff line
@@ -7806,14 +7806,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
		ioc->pend_os_device_add_sz++;
	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
	    GFP_KERNEL);
	if (!ioc->pend_os_device_add)
	if (!ioc->pend_os_device_add) {
		r = -ENOMEM;
		goto out_free_resources;
	}

	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
	ioc->device_remove_in_progress =
		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
	if (!ioc->device_remove_in_progress)
	if (!ioc->device_remove_in_progress) {
		r = -ENOMEM;
		goto out_free_resources;
	}

	ioc->fwfault_debug = mpt3sas_fwfault_debug;

+1 −0
Original line number Diff line number Diff line
@@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
		if (!qedi->global_queues[i]) {
			QEDI_ERR(&qedi->dbg_ctx,
				 "Unable to allocation global queue %d.\n", i);
			status = -ENOMEM;
			goto mem_alloc_failure;
		}

+5 −8
Original line number Diff line number Diff line
@@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
	    (cmd->sess && cmd->sess->deleted)) {
		cmd->state = QLA_TGT_STATE_PROCESSED;
		res = 0;
		goto free;
		return 0;
	}

	ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,

	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
	    &full_req_cnt);
	if (unlikely(res != 0))
		goto free;
	if (unlikely(res != 0)) {
		return res;
	}

	spin_lock_irqsave(qpair->qp_lock_ptr, flags);

@@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
			vha->flags.online, qla2x00_reset_active(vha),
			cmd->reset_count, qpair->chip_reset);
		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
		res = 0;
		goto free;
		return 0;
	}

	/* Does F/W have an IOCBs for this request */
@@ -3359,8 +3358,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
	qlt_unmap_sg(vha, cmd);
	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);

free:
	vha->hw->tgt.tgt_ops->free_cmd(cmd);
	return res;
}
EXPORT_SYMBOL(qlt_xmit_response);
+0 −4
Original line number Diff line number Diff line
@@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{
	struct qla_tgt_cmd *cmd = container_of(se_cmd,
				struct qla_tgt_cmd, se_cmd);
	struct scsi_qla_host *vha = cmd->vha;

	if (cmd->aborted) {
		/* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
@@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
			cmd->se_cmd.transport_state,
			cmd->se_cmd.t_state,
			cmd->se_cmd.se_cmd_flags);
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
		return 0;
	}

@@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
{
	struct qla_tgt_cmd *cmd = container_of(se_cmd,
				struct qla_tgt_cmd, se_cmd);
	struct scsi_qla_host *vha = cmd->vha;
	int xmit_type = QLA_TGT_XMIT_STATUS;

	if (cmd->aborted) {
@@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
		    cmd, kref_read(&cmd->se_cmd.cmd_kref),
		    cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
		    cmd->se_cmd.se_cmd_flags);
		vha->hw->tgt.tgt_ops->free_cmd(cmd);
		return 0;
	}
	cmd->bufflen = se_cmd->data_length;
Loading