Unverified Commit 69fa9590 authored by David Jander's avatar David Jander Committed by Mark Brown
Browse files

spi: Ensure the io_mutex is held until spi_finalize_current_message()



This patch introduces a completion that is completed in
spi_finalize_current_message() and waited for in
__spi_pump_transfer_message(). This way all manipulation of ctlr->cur_msg
is done with the io_mutex held and strictly ordered:
__spi_pump_transfer_message() will not return until
spi_finalize_current_message() is done using ctlr->cur_msg, and its
calling context is only touching ctlr->cur_msg after returning.
Due to this, we can safely drop the spin-locks around ctlr->cur_msg.

Signed-off-by: default avatarDavid Jander <david@protonic.nl>
Link: https://lore.kernel.org/r/20220621061234.3626638-11-david@protonic.nl


Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent 72c5c59b
Loading
Loading
Loading
Loading
+14 −18
Original line number Diff line number Diff line
@@ -1613,11 +1613,14 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
		}
	}

	reinit_completion(&ctlr->cur_msg_completion);
	ret = ctlr->transfer_one_message(ctlr, msg);
	if (ret) {
		dev_err(&ctlr->dev,
			"failed to transfer one message from queue\n");
		return ret;
	} else {
		wait_for_completion(&ctlr->cur_msg_completion);
	}

	return 0;
@@ -1704,6 +1707,12 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
	spin_unlock_irqrestore(&ctlr->queue_lock, flags);

	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);

	if (!ret)
		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
	ctlr->cur_msg = NULL;
	ctlr->fallback = false;

	mutex_unlock(&ctlr->io_mutex);

	/* Prod the scheduler in case transfer_one() was busy waiting */
@@ -1897,12 +1906,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
{
	struct spi_transfer *xfer;
	struct spi_message *mesg;
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&ctlr->queue_lock, flags);
	mesg = ctlr->cur_msg;
	spin_unlock_irqrestore(&ctlr->queue_lock, flags);

	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
@@ -1936,20 +1942,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr)

	mesg->prepared = false;

	if (!mesg->sync) {
		/*
		 * This message was sent via the async message queue. Handle
		 * the queue and kick the worker thread to do the
		 * idling/shutdown or send the next message if needed.
		 */
		spin_lock_irqsave(&ctlr->queue_lock, flags);
		WARN(ctlr->cur_msg != mesg,
			"Finalizing queued message that is not the current head of queue!");
		ctlr->cur_msg = NULL;
		ctlr->fallback = false;
		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
	}
	complete(&ctlr->cur_msg_completion);

	trace_spi_message_done(mesg);

@@ -3036,6 +3029,7 @@ int spi_register_controller(struct spi_controller *ctlr)
	}
	ctlr->bus_lock_flag = 0;
	init_completion(&ctlr->xfer_completion);
	init_completion(&ctlr->cur_msg_completion);
	if (!ctlr->max_dma_len)
		ctlr->max_dma_len = INT_MAX;

@@ -3962,6 +3956,9 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
	if (ret)
		goto out;

	ctlr->cur_msg = NULL;
	ctlr->fallback = false;

	if (!was_busy) {
		kfree(ctlr->dummy_rx);
		ctlr->dummy_rx = NULL;
@@ -4013,7 +4010,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
	 * will catch those cases.
	 */
	if (READ_ONCE(ctlr->queue_empty)) {
		message->sync = true;
		message->actual_length = 0;
		message->status = -EINPROGRESS;

+2 −4
Original line number Diff line number Diff line
@@ -384,6 +384,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
 * @queue_lock: spinlock to syncronise access to message queue
 * @queue: message queue
 * @cur_msg: the currently in-flight message
 * @cur_msg_completion: a completion for the current in-flight message
 * @cur_msg_mapped: message has been mapped for DMA
 * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip
 *           selected
@@ -615,6 +616,7 @@ struct spi_controller {
	spinlock_t			queue_lock;
	struct list_head		queue;
	struct spi_message		*cur_msg;
	struct completion               cur_msg_completion;
	bool				busy;
	bool				running;
	bool				rt;
@@ -989,7 +991,6 @@ struct spi_transfer {
 * @state: for use by whichever driver currently owns the message
 * @resources: for resource management when the spi message is processed
 * @prepared: spi_prepare_message was called for the this message
 * @sync: this message took the direct sync path skipping the async queue
 *
 * A @spi_message is used to execute an atomic sequence of data transfers,
 * each represented by a struct spi_transfer.  The sequence is "atomic"
@@ -1042,9 +1043,6 @@ struct spi_message {

	/* spi_prepare_message was called for this message */
	bool			prepared;

	/* this message is skipping the async queue */
	bool			sync;
};

static inline void spi_message_init_no_memset(struct spi_message *m)