Commit 78ca5588 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull SCSI fixes from James Bottomley:
 "Eight fixes, all in drivers (ufs, scsi_debug, storvsc, iscsi, ibmvfc).

  Apart from the ufs command clearing updates, these are mostly minor
  and obvious fixes"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: ibmvfc: Store vhost pointer during subcrq allocation
  scsi: ibmvfc: Allocate/free queue resource only during probe/remove
  scsi: storvsc: Correct reporting of Hyper-V I/O size limits
  scsi: ufs: Fix a race between the interrupt handler and the reset handler
  scsi: ufs: Support clearing multiple commands at once
  scsi: ufs: Simplify ufshcd_clear_cmd()
  scsi: iscsi: Exclude zero from the endpoint ID range
  scsi: scsi_debug: Fix zone transition to full condition
parents c5b3a094 aeaadcde
Loading
Loading
Loading
Loading
+64 −18
Original line number Diff line number Diff line
@@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);

static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);

static const char *unknown_error = "unknown error";

@@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
	struct vio_dev *vdev = to_vio_dev(vhost->dev);
	unsigned long flags;

	ibmvfc_release_sub_crqs(vhost);
	ibmvfc_dereg_sub_crqs(vhost);

	/* Re-enable the CRQ */
	do {
@@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
	spin_unlock(vhost->crq.q_lock);
	spin_unlock_irqrestore(vhost->host->host_lock, flags);

	ibmvfc_init_sub_crqs(vhost);
	ibmvfc_reg_sub_crqs(vhost);

	return rc;
}
@@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
	struct vio_dev *vdev = to_vio_dev(vhost->dev);
	struct ibmvfc_queue *crq = &vhost->crq;

	ibmvfc_release_sub_crqs(vhost);
	ibmvfc_dereg_sub_crqs(vhost);

	/* Close the CRQ */
	do {
@@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
	spin_unlock(vhost->crq.q_lock);
	spin_unlock_irqrestore(vhost->host->host_lock, flags);

	ibmvfc_init_sub_crqs(vhost);
	ibmvfc_reg_sub_crqs(vhost);

	return rc;
}
@@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
	queue->cur = 0;
	queue->fmt = fmt;
	queue->size = PAGE_SIZE / fmt_size;

	queue->vhost = vhost;
	return 0;
}

@@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,

	ENTER;

	if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
		return -ENOMEM;

	rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
			   &scrq->cookie, &scrq->hw_irq);

@@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
	}

	scrq->hwq_id = index;
	scrq->vhost = vhost;

	LEAVE;
	return 0;
@@ -5800,7 +5798,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
	} while (rtas_busy_delay(rc));
reg_failed:
	ibmvfc_free_queue(vhost, scrq);
	LEAVE;
	return rc;
}
@@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
	if (rc)
		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);

	ibmvfc_free_queue(vhost, scrq);
	/* Clean out the queue */
	memset(scrq->msgs.crq, 0, PAGE_SIZE);
	scrq->cur = 0;

	LEAVE;
}

static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
{
	int i, j;

	ENTER;
	if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
		return;

	for (i = 0; i < nr_scsi_hw_queues; i++) {
		if (ibmvfc_register_scsi_channel(vhost, i)) {
			for (j = i; j > 0; j--)
				ibmvfc_deregister_scsi_channel(vhost, j - 1);
			vhost->do_enquiry = 0;
			return;
		}
	}

	LEAVE;
}

static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
{
	int i;

	ENTER;
	if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
		return;

	for (i = 0; i < nr_scsi_hw_queues; i++)
		ibmvfc_deregister_scsi_channel(vhost, i);

	LEAVE;
}

static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
{
	struct ibmvfc_queue *scrq;
	int i, j;

	ENTER;
@@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
	}

	for (i = 0; i < nr_scsi_hw_queues; i++) {
		if (ibmvfc_register_scsi_channel(vhost, i)) {
			for (j = i; j > 0; j--)
				ibmvfc_deregister_scsi_channel(vhost, j - 1);
		scrq = &vhost->scsi_scrqs.scrqs[i];
		if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
			for (j = i; j > 0; j--) {
				scrq = &vhost->scsi_scrqs.scrqs[j - 1];
				ibmvfc_free_queue(vhost, scrq);
			}
			kfree(vhost->scsi_scrqs.scrqs);
			vhost->scsi_scrqs.scrqs = NULL;
			vhost->scsi_scrqs.active_queues = 0;
			vhost->do_enquiry = 0;
			break;
			vhost->mq_enabled = 0;
			return;
		}
	}

	ibmvfc_reg_sub_crqs(vhost);

	LEAVE;
}

static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
{
	struct ibmvfc_queue *scrq;
	int i;

	ENTER;
	if (!vhost->scsi_scrqs.scrqs)
		return;

	for (i = 0; i < nr_scsi_hw_queues; i++)
		ibmvfc_deregister_scsi_channel(vhost, i);
	ibmvfc_dereg_sub_crqs(vhost);

	for (i = 0; i < nr_scsi_hw_queues; i++) {
		scrq = &vhost->scsi_scrqs.scrqs[i];
		ibmvfc_free_queue(vhost, scrq);
	}

	kfree(vhost->scsi_scrqs.scrqs);
	vhost->scsi_scrqs.scrqs = NULL;
+1 −1
Original line number Diff line number Diff line
@@ -789,6 +789,7 @@ struct ibmvfc_queue {
	spinlock_t _lock;
	spinlock_t *q_lock;

	struct ibmvfc_host *vhost;
	struct ibmvfc_event_pool evt_pool;
	struct list_head sent;
	struct list_head free;
@@ -797,7 +798,6 @@ struct ibmvfc_queue {
	union ibmvfc_iu cancel_rsp;

	/* Sub-CRQ fields */
	struct ibmvfc_host *vhost;
	unsigned long cookie;
	unsigned long vios_cookie;
	unsigned long hw_irq;
+20 −2
Original line number Diff line number Diff line
@@ -2826,6 +2826,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
	}
}

static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
				     struct sdeb_zone_state *zsp)
{
	switch (zsp->z_cond) {
	case ZC2_IMPLICIT_OPEN:
		devip->nr_imp_open--;
		break;
	case ZC3_EXPLICIT_OPEN:
		devip->nr_exp_open--;
		break;
	default:
		WARN_ONCE(true, "Invalid zone %llu condition %x\n",
			  zsp->z_start, zsp->z_cond);
		break;
	}
	zsp->z_cond = ZC5_FULL;
}

static void zbc_inc_wp(struct sdebug_dev_info *devip,
		       unsigned long long lba, unsigned int num)
{
@@ -2838,7 +2856,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
	if (zsp->z_type == ZBC_ZTYPE_SWR) {
		zsp->z_wp += num;
		if (zsp->z_wp >= zend)
			zsp->z_cond = ZC5_FULL;
			zbc_set_zone_full(devip, zsp);
		return;
	}

@@ -2857,7 +2875,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
			n = num;
		}
		if (zsp->z_wp >= zend)
			zsp->z_cond = ZC5_FULL;
			zbc_set_zone_full(devip, zsp);

		num -= n;
		lba += n;
+6 −1
Original line number Diff line number Diff line
@@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size)
		return NULL;

	mutex_lock(&iscsi_ep_idr_mutex);
	id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);

	/*
	 * First endpoint id should be 1 to comply with user space
	 * applications (iscsid).
	 */
	id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
	if (id < 0) {
		mutex_unlock(&iscsi_ep_idr_mutex);
		printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+22 −5
Original line number Diff line number Diff line
@@ -1844,7 +1844,7 @@ static struct scsi_host_template scsi_driver = {
	.cmd_per_lun =		2048,
	.this_id =		-1,
	/* Ensure there are no gaps in presented sgls */
	.virt_boundary_mask =	PAGE_SIZE-1,
	.virt_boundary_mask =	HV_HYP_PAGE_SIZE - 1,
	.no_write_same =	1,
	.track_queue_depth =	1,
	.change_queue_depth =	storvsc_change_queue_depth,
@@ -1895,6 +1895,7 @@ static int storvsc_probe(struct hv_device *device,
	int target = 0;
	struct storvsc_device *stor_device;
	int max_sub_channels = 0;
	u32 max_xfer_bytes;

	/*
	 * We support sub-channels for storage on SCSI and FC controllers.
@@ -1968,12 +1969,28 @@ static int storvsc_probe(struct hv_device *device,
	}
	/* max cmd length */
	host->max_cmd_len = STORVSC_MAX_CMD_LEN;

	/*
	 * set the table size based on the info we got
	 * from the host.
	 * Any reasonable Hyper-V configuration should provide
	 * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
	 * protecting it from any weird value.
	 */
	max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
	/* max_hw_sectors_kb */
	host->max_sectors = max_xfer_bytes >> 9;
	/*
	 * There are 2 requirements for Hyper-V storvsc sgl segments,
	 * based on which the below calculation for max segments is
	 * done:
	 *
	 * 1. Except for the first and last sgl segment, all sgl segments
	 *    should be align to HV_HYP_PAGE_SIZE, that also means the
	 *    maximum number of segments in a sgl can be calculated by
	 *    dividing the total max transfer length by HV_HYP_PAGE_SIZE.
	 *
	 * 2. Except for the first and last, each entry in the SGL must
	 *    have an offset that is a multiple of HV_HYP_PAGE_SIZE.
	 */
	host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
	host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
	/*
	 * For non-IDE disks, the host supports multiple channels.
	 * Set the number of HW queues we are supporting.
Loading