Unverified Commit 6aaa8434 authored by Arnd Bergmann's avatar Arnd Bergmann
Browse files

Merge tag 'scmi-fixes-5.15' of...

Merge tag 'scmi-fixes-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/fixes

SCMI fixes for v5.15

A few fixes addressing:
- Kconfig dependency between VIRTIO and ARM_SCMI_PROTOCOL
- Link-time error with __exit annotation for virtio_scmi_exit
- Unnecessary nested irqsave/irqrestore spinlocks in virtio transport
- Missing SMP barriers to protect accesses to SCMI virtio device

* tag 'scmi-fixes-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux:
  firmware: arm_scmi: Add proper barriers to scmi virtio device
  firmware: arm_scmi: Simplify spinlocks in virtio transport
  firmware: arm_scmi: Remove __exit annotation
  firmware: arm_scmi: Fix virtio transport Kconfig dependency

Link: https://lore.kernel.org/r/20211007102822.27886-1-sudeep.holla@arm.com


Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents 3c7f58b3 bf1acf80
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ config ARM_SCMI_TRANSPORT_SMC

config ARM_SCMI_TRANSPORT_VIRTIO
	bool "SCMI transport based on VirtIO"
	depends on VIRTIO
	depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
	select ARM_SCMI_HAVE_TRANSPORT
	select ARM_SCMI_HAVE_MSG
	help
+31 −13
Original line number Diff line number Diff line
@@ -110,18 +110,16 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
	if (vioch->is_rx) {
		scmi_vio_feed_vq_rx(vioch, msg);
	} else {
		unsigned long flags;

		spin_lock_irqsave(&vioch->lock, flags);
		/* Here IRQs are assumed to be already disabled by the caller */
		spin_lock(&vioch->lock);
		list_add(&msg->list, &vioch->free_list);
		spin_unlock_irqrestore(&vioch->lock, flags);
		spin_unlock(&vioch->lock);
	}
}

static void scmi_vio_complete_cb(struct virtqueue *vqueue)
{
	unsigned long ready_flags;
	unsigned long flags;
	unsigned int length;
	struct scmi_vio_channel *vioch;
	struct scmi_vio_msg *msg;
@@ -140,7 +138,8 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
			goto unlock_ready_out;
		}

		spin_lock_irqsave(&vioch->lock, flags);
		/* IRQs already disabled here no need to irqsave */
		spin_lock(&vioch->lock);
		if (cb_enabled) {
			virtqueue_disable_cb(vqueue);
			cb_enabled = false;
@@ -151,7 +150,7 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
				goto unlock_out;
			cb_enabled = true;
		}
		spin_unlock_irqrestore(&vioch->lock, flags);
		spin_unlock(&vioch->lock);

		if (msg) {
			msg->rx_len = length;
@@ -161,11 +160,18 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
			scmi_finalize_message(vioch, msg);
		}

		/*
		 * Release ready_lock and re-enable IRQs between loop iterations
		 * to allow virtio_chan_free() to possibly kick in and set the
		 * flag vioch->ready to false even in between processing of
		 * messages, so as to force outstanding messages to be ignored
		 * when system is shutting down.
		 */
		spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
	}

unlock_out:
	spin_unlock_irqrestore(&vioch->lock, flags);
	spin_unlock(&vioch->lock);
unlock_ready_out:
	spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
}
@@ -384,8 +390,11 @@ static int scmi_vio_probe(struct virtio_device *vdev)
	struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];

	/* Only one SCMI VirtiO device allowed */
	if (scmi_vdev)
		return -EINVAL;
	if (scmi_vdev) {
		dev_err(dev,
			"One SCMI Virtio device was already initialized: only one allowed.\n");
		return -EBUSY;
	}

	have_vq_rx = scmi_vio_have_vq_rx(vdev);
	vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
@@ -428,16 +437,25 @@ static int scmi_vio_probe(struct virtio_device *vdev)
	}

	vdev->priv = channels;
	scmi_vdev = vdev;
	/* Ensure initialized scmi_vdev is visible */
	smp_store_mb(scmi_vdev, vdev);

	return 0;
}

static void scmi_vio_remove(struct virtio_device *vdev)
{
	/*
	 * Once we get here, virtio_chan_free() will have already been called by
	 * the SCMI core for any existing channel and, as a consequence, all the
	 * virtio channels will have been already marked NOT ready, causing any
	 * outstanding message on any vqueue to be ignored by complete_cb: now
	 * we can just stop processing buffers and destroy the vqueues.
	 */
	vdev->config->reset(vdev);
	vdev->config->del_vqs(vdev);
	scmi_vdev = NULL;
	/* Ensure scmi_vdev is visible as NULL */
	smp_store_mb(scmi_vdev, NULL);
}

static int scmi_vio_validate(struct virtio_device *vdev)
@@ -476,7 +494,7 @@ static int __init virtio_scmi_init(void)
	return register_virtio_driver(&virtio_scmi_driver);
}

static void __exit virtio_scmi_exit(void)
static void virtio_scmi_exit(void)
{
	unregister_virtio_driver(&virtio_scmi_driver);
}