Commit 1d956ad8 authored by Tyrel Datwyler's avatar Tyrel Datwyler Committed by Martin K. Petersen
Browse files

scsi: ibmvfc: Add handlers to drain and complete Sub-CRQ responses

The logic for iterating over the Sub-CRQ responses is similiar to that of
the primary CRQ. Add the necessary handlers for processing those responses.

Link: https://lore.kernel.org/r/20210114203148.246656-10-tyreld@linux.ibm.com


Reviewed-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarTyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent d20046e6
Loading
Loading
Loading
Loading
+86 −0
Original line number Diff line number Diff line
@@ -3485,6 +3485,92 @@ static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
	return rc;
}

static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
			       struct list_head *evt_doneq)
{
	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);

	switch (crq->valid) {
	case IBMVFC_CRQ_CMD_RSP:
		break;
	case IBMVFC_CRQ_XPORT_EVENT:
		return;
	default:
		dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
		return;
	}

	/* The only kind of payload CRQs we should get are responses to
	 * things we send. Make sure this response is to something we
	 * actually sent
	 */
	if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
			crq->ioba);
		return;
	}

	if (unlikely(atomic_read(&evt->free))) {
		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
			crq->ioba);
		return;
	}

	spin_lock(&evt->queue->l_lock);
	list_move_tail(&evt->queue_list, evt_doneq);
	spin_unlock(&evt->queue->l_lock);
}

static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
{
	struct ibmvfc_crq *crq;

	crq = &scrq->msgs.scrq[scrq->cur].crq;
	if (crq->valid & 0x80) {
		if (++scrq->cur == scrq->size)
			scrq->cur = 0;
		rmb();
	} else
		crq = NULL;

	return crq;
}

static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
{
	struct ibmvfc_crq *crq;
	struct ibmvfc_event *evt, *temp;
	unsigned long flags;
	int done = 0;
	LIST_HEAD(evt_doneq);

	spin_lock_irqsave(scrq->q_lock, flags);
	while (!done) {
		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
			crq->valid = 0;
			wmb();
		}

		ibmvfc_toggle_scrq_irq(scrq, 1);
		if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
			ibmvfc_toggle_scrq_irq(scrq, 0);
			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
			crq->valid = 0;
			wmb();
		} else
			done = 1;
	}
	spin_unlock_irqrestore(scrq->q_lock, flags);

	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
		del_timer(&evt->timer);
		list_del(&evt->queue_list);
		ibmvfc_trc_end(evt);
		evt->done(evt);
	}
}

/**
 * ibmvfc_init_tgt - Set the next init job step for the target
 * @tgt:		ibmvfc target struct