Commit a250c23f authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel
Browse files

iommu: remove DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE



Instead make the global iommu_dma_strict paramete in iommu.c canonical by
exporting helpers to get and set it and use those directly in the drivers.

This make sure that the iommu.strict parameter also works for the AMD and
Intel IOMMU drivers on x86.  As those default to lazy flushing a new
IOMMU_CMD_LINE_STRICT is used to turn the value into a tristate to
represent the default if not overriden by an explicit parameter.

[ported on top of the other iommu_attr changes and added a few small
 missing bits]

Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com&gt;.>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20210401155256.298656-19-hch@lst.de


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 3189713a
Loading
Loading
Loading
Loading
+1 −22
Original line number Diff line number Diff line
@@ -1771,26 +1771,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
	return acpihid_device_group(dev);
}

static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
		enum iommu_attr attr, void *data)
{
	switch (domain->type) {
	case IOMMU_DOMAIN_UNMANAGED:
		return -ENODEV;
	case IOMMU_DOMAIN_DMA:
		switch (attr) {
		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
			*(int *)data = !amd_iommu_unmap_flush;
			return 0;
		default:
			return -ENODEV;
		}
		break;
	default:
		return -EINVAL;
	}
}

/*****************************************************************************
 *
 * The next functions belong to the dma_ops mapping/unmapping code.
@@ -1855,7 +1835,7 @@ int __init amd_iommu_init_dma_ops(void)
		pr_info("IO/TLB flush on unmap enabled\n");
	else
		pr_info("Lazy IO/TLB flushing enabled\n");

	iommu_set_dma_strict(amd_iommu_unmap_flush);
	return 0;

}
@@ -2257,7 +2237,6 @@ const struct iommu_ops amd_iommu_ops = {
	.release_device = amd_iommu_release_device,
	.probe_finalize = amd_iommu_probe_finalize,
	.device_group = amd_iommu_device_group,
	.domain_get_attr = amd_iommu_domain_get_attr,
	.get_resv_regions = amd_iommu_get_resv_regions,
	.put_resv_regions = generic_iommu_put_resv_regions,
	.is_attach_deferred = amd_iommu_is_attach_deferred,
+1 −49
Original line number Diff line number Diff line
@@ -2040,7 +2040,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
		.iommu_dev	= smmu->dev,
	};

	if (smmu_domain->non_strict)
	if (!iommu_get_dma_strict(domain))
		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;

	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2549,52 +2549,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
	return group;
}

static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
				    enum iommu_attr attr, void *data)
{
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);

	switch (domain->type) {
	case IOMMU_DOMAIN_DMA:
		switch (attr) {
		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
			*(int *)data = smmu_domain->non_strict;
			return 0;
		default:
			return -ENODEV;
		}
		break;
	default:
		return -EINVAL;
	}
}

static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
				    enum iommu_attr attr, void *data)
{
	int ret = 0;
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);

	mutex_lock(&smmu_domain->init_mutex);

	switch (domain->type) {
	case IOMMU_DOMAIN_DMA:
		switch(attr) {
		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
			smmu_domain->non_strict = *(int *)data;
			break;
		default:
			ret = -ENODEV;
		}
		break;
	default:
		ret = -EINVAL;
	}

	mutex_unlock(&smmu_domain->init_mutex);
	return ret;
}

static int arm_smmu_enable_nesting(struct iommu_domain *domain)
{
	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2707,8 +2661,6 @@ static struct iommu_ops arm_smmu_ops = {
	.probe_device		= arm_smmu_probe_device,
	.release_device		= arm_smmu_release_device,
	.device_group		= arm_smmu_device_group,
	.domain_get_attr	= arm_smmu_domain_get_attr,
	.domain_set_attr	= arm_smmu_domain_set_attr,
	.enable_nesting		= arm_smmu_enable_nesting,
	.of_xlate		= arm_smmu_of_xlate,
	.get_resv_regions	= arm_smmu_get_resv_regions,
+0 −1
Original line number Diff line number Diff line
@@ -677,7 +677,6 @@ struct arm_smmu_domain {
	struct mutex			init_mutex; /* Protects smmu pointer */

	struct io_pgtable_ops		*pgtbl_ops;
	bool				non_strict;
	atomic_t			nr_ats_masters;

	enum arm_smmu_domain_stage	stage;
+3 −24
Original line number Diff line number Diff line
@@ -761,6 +761,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
		.iommu_dev	= smmu->dev,
	};

	if (!iommu_get_dma_strict(domain))
		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;

	if (smmu->impl && smmu->impl->init_context) {
		ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
		if (ret)
@@ -1499,18 +1502,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
			return -ENODEV;
		}
		break;
	case IOMMU_DOMAIN_DMA:
		switch (attr) {
		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: {
			bool non_strict = smmu_domain->pgtbl_cfg.quirks &
					  IO_PGTABLE_QUIRK_NON_STRICT;
			*(int *)data = non_strict;
			return 0;
		}
		default:
			return -ENODEV;
		}
		break;
	default:
		return -EINVAL;
	}
@@ -1557,18 +1548,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
			ret = -ENODEV;
		}
		break;
	case IOMMU_DOMAIN_DMA:
		switch (attr) {
		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
			if (*(int *)data)
				smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
			else
				smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT;
			break;
		default:
			ret = -ENODEV;
		}
		break;
	default:
		ret = -EINVAL;
	}
+2 −7
Original line number Diff line number Diff line
@@ -306,10 +306,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)

	cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
	domain = cookie->fq_domain;
	/*
	 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
	 * implies that ops->flush_iotlb_all must be non-NULL.
	 */

	domain->ops->flush_iotlb_all(domain);
}

@@ -336,7 +333,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	unsigned long order, base_pfn;
	struct iova_domain *iovad;
	int attr;

	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
		return -EINVAL;
@@ -373,8 +369,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
	init_iova_domain(iovad, 1UL << order, base_pfn);

	if (!cookie->fq_domain && (!dev || !dev_is_untrusted(dev)) &&
	    !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) &&
	    attr) {
	    domain->ops->flush_iotlb_all && !iommu_get_dma_strict(domain)) {
		if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
					  iommu_dma_entry_dtor))
			pr_warn("iova flush queue initialization failed\n");
Loading