Commit 4c82b886 authored by Lu Baolu's avatar Lu Baolu Committed by Joerg Roedel
Browse files

iommu/vt-d: Allocate/register iopf queue for sva devices



This allocates and registers the iopf queue infrastructure for devices
which want to support IO page fault for SVA.

Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20210520031531.712333-1-baolu.lu@linux.intel.com
Link: https://lore.kernel.org/r/20210610020115.1637656-11-baolu.lu@linux.intel.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent ae7f09b1
Loading
Loading
Loading
Loading
+47 −19
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@
#include <asm/iommu.h>

#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
#include "cap_audit.h"

@@ -5338,6 +5339,34 @@ static int intel_iommu_disable_auxd(struct device *dev)
	return 0;
}

static int intel_iommu_enable_sva(struct device *dev)
{
	struct device_domain_info *info = get_domain_info(dev);
	struct intel_iommu *iommu = info->iommu;

	if (!info || !iommu || dmar_disabled)
		return -EINVAL;

	if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
		return -ENODEV;

	if (intel_iommu_enable_pasid(iommu, dev))
		return -ENODEV;

	if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
		return -EINVAL;

	return iopf_queue_add_device(iommu->iopf_queue, dev);
}

static int intel_iommu_disable_sva(struct device *dev)
{
	struct device_domain_info *info = get_domain_info(dev);
	struct intel_iommu *iommu = info->iommu;

	return iopf_queue_remove_device(iommu->iopf_queue, dev);
}

/*
 * A PCI express designated vendor specific extended capability is defined
 * in the section 3.7 of Intel scalable I/O virtualization technical spec
@@ -5399,39 +5428,38 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
static int
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
{
	if (feat == IOMMU_DEV_FEAT_AUX)
	switch (feat) {
	case IOMMU_DEV_FEAT_AUX:
		return intel_iommu_enable_auxd(dev);

	if (feat == IOMMU_DEV_FEAT_IOPF)
	case IOMMU_DEV_FEAT_IOPF:
		return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;

	if (feat == IOMMU_DEV_FEAT_SVA) {
		struct device_domain_info *info = get_domain_info(dev);

		if (!info)
			return -EINVAL;
	case IOMMU_DEV_FEAT_SVA:
		return intel_iommu_enable_sva(dev);

		if (intel_iommu_enable_pasid(info->iommu, dev))
	default:
		return -ENODEV;

		if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
			return -EINVAL;

		if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
			return 0;
	}

	return -ENODEV;
}

static int
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{
	if (feat == IOMMU_DEV_FEAT_AUX)
	switch (feat) {
	case IOMMU_DEV_FEAT_AUX:
		return intel_iommu_disable_auxd(dev);

	case IOMMU_DEV_FEAT_IOPF:
		return 0;

	case IOMMU_DEV_FEAT_SVA:
		return intel_iommu_disable_sva(dev);

	default:
		return -ENODEV;
	}
}

static bool
intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
+30 −7
Original line number Diff line number Diff line
@@ -84,6 +84,7 @@ svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)

int intel_svm_enable_prq(struct intel_iommu *iommu)
{
	struct iopf_queue *iopfq;
	struct page *pages;
	int irq, ret;

@@ -100,13 +101,20 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
		pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
		       iommu->name);
		ret = -EINVAL;
	err:
		free_pages((unsigned long)iommu->prq, PRQ_ORDER);
		iommu->prq = NULL;
		return ret;
		goto free_prq;
	}
	iommu->pr_irq = irq;

	snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
		 "dmar%d-iopfq", iommu->seq_id);
	iopfq = iopf_queue_alloc(iommu->iopfq_name);
	if (!iopfq) {
		pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
		ret = -ENOMEM;
		goto free_hwirq;
	}
	iommu->iopf_queue = iopfq;

	snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);

	ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
@@ -114,9 +122,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
	if (ret) {
		pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
		       iommu->name);
		dmar_free_hwirq(irq);
		iommu->pr_irq = 0;
		goto err;
		goto free_iopfq;
	}
	dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
	dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
@@ -125,6 +131,18 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
	init_completion(&iommu->prq_complete);

	return 0;

free_iopfq:
	iopf_queue_free(iommu->iopf_queue);
	iommu->iopf_queue = NULL;
free_hwirq:
	dmar_free_hwirq(irq);
	iommu->pr_irq = 0;
free_prq:
	free_pages((unsigned long)iommu->prq, PRQ_ORDER);
	iommu->prq = NULL;

	return ret;
}

int intel_svm_finish_prq(struct intel_iommu *iommu)
@@ -139,6 +157,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
		iommu->pr_irq = 0;
	}

	if (iommu->iopf_queue) {
		iopf_queue_free(iommu->iopf_queue);
		iommu->iopf_queue = NULL;
	}

	free_pages((unsigned long)iommu->prq, PRQ_ORDER);
	iommu->prq = NULL;

+2 −0
Original line number Diff line number Diff line
@@ -606,6 +606,8 @@ struct intel_iommu {
	struct completion prq_complete;
	struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
	struct iopf_queue *iopf_queue;
	unsigned char iopfq_name[16];
	struct q_inval  *qi;            /* Queued invalidation info */
	u32 *iommu_state; /* Store iommu states between suspend and resume.*/