Commit 68e31835 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

NTB/msi: Convert to msi_on_each_desc()



Replace the about to vanish iterators, make use of the filtering and take
the descriptor lock around the iteration.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20211206210748.683004012@linutronix.de
parent dc2b4532
Loading
Loading
Loading
Loading
+13 −6
Original line number Diff line number Diff line
@@ -108,8 +108,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
	if (!ntb->msi)
		return -EINVAL;

	desc = first_msi_entry(&ntb->pdev->dev);
	msi_lock_descs(&ntb->pdev->dev);
	desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
	addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
	msi_unlock_descs(&ntb->pdev->dev);

	for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
		peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@@ -281,13 +283,15 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
				  const char *name, void *dev_id,
				  struct ntb_msi_desc *msi_desc)
{
	struct device *dev = &ntb->pdev->dev;
	struct msi_desc *entry;
	int ret;

	if (!ntb->msi)
		return -EINVAL;

	for_each_pci_msi_entry(entry, ntb->pdev) {
	msi_lock_descs(dev);
	msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
		if (irq_has_action(entry->irq))
			continue;

@@ -304,14 +308,17 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
		ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
		if (ret) {
			devm_free_irq(&ntb->dev, entry->irq, dev_id);
			return ret;
			goto unlock;
		}


		return entry->irq;
		ret = entry->irq;
		goto unlock;
	}
	ret = -ENODEV;

	return -ENODEV;
unlock:
	msi_unlock_descs(dev);
	return ret;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);