Commit 31750fbd authored by Tyrel Datwyler's avatar Tyrel Datwyler Committed by Martin K. Petersen
Browse files

scsi: ibmvfc: Send commands down HW Sub-CRQ when channelized

When the client has negotiated the use of channels all vfcFrames are
required to go down a Sub-CRQ channel or it is a protocoal violation. If
the adapter state is channelized submit vfcFrames to the appropriate
Sub-CRQ via the h_send_sub_crq() helper.

Link: https://lore.kernel.org/r/20210114203148.246656-16-tyreld@linux.ibm.com


Reviewed-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarTyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent cb72477b
Loading
Loading
Loading
Loading
+33 −6
Original line number Diff line number Diff line
@@ -704,6 +704,15 @@ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}

static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
			       u64 word2, u64 word3, u64 word4)
{
	struct vio_dev *vdev = to_vio_dev(vhost->dev);

	return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
				  word1, word2, word3, word4);
}

/**
 * ibmvfc_send_crq_init - Send a CRQ init message
 * @vhost:	ibmvfc host struct
@@ -1623,8 +1632,17 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,

	mb();

	if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
				  be64_to_cpu(crq_as_u64[1])))) {
	if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
		rc = ibmvfc_send_sub_crq(vhost,
					 evt->queue->vios_cookie,
					 be64_to_cpu(crq_as_u64[0]),
					 be64_to_cpu(crq_as_u64[1]),
					 0, 0);
	else
		rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
				     be64_to_cpu(crq_as_u64[1]));

	if (rc) {
		list_del(&evt->queue_list);
		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
		del_timer(&evt->timer);
@@ -1842,6 +1860,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
	struct ibmvfc_event *evt;
	u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
	u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
	u16 scsi_channel;
	int rc;

	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
@@ -1852,7 +1871,13 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
	}

	cmnd->result = (DID_OK << 16);
	if (vhost->using_channels) {
		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
	} else
		evt = ibmvfc_get_event(&vhost->crq);

	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
	evt->cmnd = cmnd;

@@ -1868,8 +1893,6 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
	}

	vfc_cmd->correlation = cpu_to_be64(evt);
	if (vhost->using_channels)
		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;

	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
		return ibmvfc_send_event(evt, vhost, 0);
@@ -2200,7 +2223,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)

	spin_lock_irqsave(vhost->host->host_lock, flags);
	if (vhost->state == IBMVFC_ACTIVE) {
		if (vhost->using_channels)
			evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
		else
			evt = ibmvfc_get_event(&vhost->crq);

		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
		iu = ibmvfc_get_fcp_iu(vhost, tmf);