Commit 169f5eb2 authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen
Browse files

scsi: ufs: Optimize SCSI command processing

Use a spinlock to protect hba->outstanding_reqs instead of using atomic
operations to update this member variable.

This patch is a performance improvement because it reduces the number of
atomic operations in the hot path (test_and_clear_bit()) and because it
reduces the lock contention on the SCSI host lock. On my test setup this
patch improves IOPS by about 1%.

Link: https://lore.kernel.org/r/20210722033439.26550-14-bvanassche@acm.org


Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Stanley Chu <stanley.chu@mediatek.com>
Cc: Can Guo <cang@codeaurora.org>
Cc: Asutosh Das <asutoshd@codeaurora.org>
Cc: Avri Altman <avri.altman@wdc.com>
Reviewed-by: default avatarDaejun Park <daejun7.park@samsung.com>
Reviewed-by: default avatarBean Huo <beanhuo@micron.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent a024ad0d
Loading
Loading
Loading
Loading
+18 −11
Original line number Diff line number Diff line
@@ -2096,12 +2096,14 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
	ufshcd_clk_scaling_start_busy(hba);
	if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
		ufshcd_start_monitor(hba, lrbp);
	spin_lock_irqsave(hba->host->host_lock, flags);

	spin_lock_irqsave(&hba->outstanding_lock, flags);
	if (hba->vops && hba->vops->setup_xfer_req)
		hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
	set_bit(task_tag, &hba->outstanding_reqs);
	__set_bit(task_tag, &hba->outstanding_reqs);
	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	spin_unlock_irqrestore(&hba->outstanding_lock, flags);

	/* Make sure that doorbell is committed immediately */
	wmb();
}
@@ -2890,7 +2892,9 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
		 * we also need to clear the outstanding_request
		 * field in hba
		 */
		clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
		spin_lock_irqsave(&hba->outstanding_lock, flags);
		__clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
		spin_unlock_irqrestore(&hba->outstanding_lock, flags);
	}

	return err;
@@ -5230,8 +5234,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
	bool update_scaling = false;

	for_each_set_bit(index, &completed_reqs, hba->nutrs) {
		if (!test_and_clear_bit(index, &hba->outstanding_reqs))
			continue;
		lrbp = &hba->lrb[index];
		lrbp->compl_time_stamp = ktime_get();
		cmd = lrbp->cmd;
@@ -5286,10 +5288,14 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
	    !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
		ufshcd_reset_intr_aggr(hba);

	spin_lock_irqsave(hba->host->host_lock, flags);
	spin_lock_irqsave(&hba->outstanding_lock, flags);
	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
	WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
		  "completed: %#lx; outstanding: %#lx\n", completed_reqs,
		  hba->outstanding_reqs);
	hba->outstanding_reqs &= ~completed_reqs;
	spin_unlock_irqrestore(&hba->outstanding_lock, flags);

	if (completed_reqs) {
		__ufshcd_transfer_req_compl(hba, completed_reqs);
@@ -9411,10 +9417,11 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
	hba = shost_priv(host);
	hba->host = host;
	hba->dev = dev;
	*hba_handle = hba;
	hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;

	INIT_LIST_HEAD(&hba->clk_list_head);
	spin_lock_init(&hba->outstanding_lock);

	*hba_handle = hba;

out_error:
	return err;
+2 −0
Original line number Diff line number Diff line
@@ -720,6 +720,7 @@ struct ufs_hba_monitor {
 * @lrb: local reference block
 * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
 * @outstanding_tasks: Bits representing outstanding task requests
 * @outstanding_lock: Protects @outstanding_reqs.
 * @outstanding_reqs: Bits representing outstanding transfer requests
 * @capabilities: UFS Controller Capabilities
 * @nutrs: Transfer Request Queue depth supported by controller
@@ -806,6 +807,7 @@ struct ufs_hba {
	struct ufshcd_lrb *lrb;

	unsigned long outstanding_tasks;
	spinlock_t outstanding_lock;
	unsigned long outstanding_reqs;

	u32 capabilities;