Commit b05d4e48 authored by Douglas Gilbert's avatar Douglas Gilbert Committed by Martin K. Petersen
Browse files

scsi: scsi_debug: Refine sdebug_blk_mq_poll()

Refine the sdebug_blk_mq_poll() function so it only takes the spinlock on
the queue when it can see one or more requests with the in_use bitmap flag
set.

Link: https://lore.kernel.org/r/20220109012853.301953-5-dgilbert@interlog.com


Signed-off-by: default avatarDouglas Gilbert <dgilbert@interlog.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 7d5a129b
Loading
Loading
Loading
Loading
+16 −5
Original line number Diff line number Diff line
@@ -7396,6 +7396,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
{
	bool first;
	bool retiring = false;
	bool locked = false;
	int num_entries = 0;
	unsigned int qc_idx = 0;
	unsigned long iflags;
@@ -7407,16 +7408,23 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
	struct sdebug_defer *sd_dp;

	sqp = sdebug_q_arr + queue_num;
	spin_lock_irqsave(&sqp->qc_lock, iflags);
	qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
	if (qc_idx >= sdebug_max_queue)
		return 0;

	for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
		if (!locked) {
			spin_lock_irqsave(&sqp->qc_lock, iflags);
			locked = true;
		}
		if (first) {
			qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
			first = false;
			if (!test_bit(qc_idx, sqp->in_use_bm))
				continue;
		} else {
			qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
		}
		if (unlikely(qc_idx >= sdebug_max_queue))
		if (qc_idx >= sdebug_max_queue)
			break;

		sqcp = &sqp->qc_arr[qc_idx];
@@ -7465,10 +7473,13 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
		}
		WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
		locked = false;
		scsi_done(scp); /* callback to mid level */
		spin_lock_irqsave(&sqp->qc_lock, iflags);
		num_entries++;
		if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
			break;	/* if no more then exit without retaking spinlock */
	}
	if (locked)
		spin_unlock_irqrestore(&sqp->qc_lock, iflags);
	if (num_entries > 0)
		atomic_add(num_entries, &sdeb_mq_poll_count);