Commit 7ecbf253 authored by Thierry Reding's avatar Thierry Reding Committed by Krzysztof Kozlowski
Browse files

iommu/arm-smmu: tegra: Detect number of instances at runtime



Parse the reg property in device tree and detect the number of instances
represented by a device tree node. This is subsequently needed in order
to support single-instance SMMUs with the Tegra implementation because
additional programming is needed to properly configure the SID override
registers in the memory controller.

Signed-off-by: default avatarThierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20210603164632.1000458-5-thierry.reding@gmail.com


Signed-off-by: default avatarKrzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
parent 4287861d
Loading
Loading
Loading
Loading
+41 −16
Original line number Diff line number Diff line
@@ -20,13 +20,19 @@
 * The third instance usage is through standard arm-smmu driver itself and
 * is out of scope of this implementation.
 */
#define NUM_SMMU_INSTANCES 2
#define MAX_SMMU_INSTANCES 2

struct nvidia_smmu {
	struct arm_smmu_device smmu;
	void __iomem		*bases[NUM_SMMU_INSTANCES];
	void __iomem *bases[MAX_SMMU_INSTANCES];
	unsigned int num_instances;
};

static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)
{
	return container_of(smmu, struct nvidia_smmu, smmu);
}

static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
					     unsigned int inst, int page)
{
@@ -47,9 +53,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
				  int page, int offset, u32 val)
{
	struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
	unsigned int i;

	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
	for (i = 0; i < nvidia->num_instances; i++) {
		void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;

		writel_relaxed(val, reg);
@@ -67,9 +74,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
				    int page, int offset, u64 val)
{
	struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
	unsigned int i;

	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
	for (i = 0; i < nvidia->num_instances; i++) {
		void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;

		writeq_relaxed(val, reg);
@@ -79,6 +87,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
				 int sync, int status)
{
	struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
	unsigned int delay;

	arm_smmu_writel(smmu, page, sync, 0);
@@ -90,7 +99,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
			u32 val = 0;
			unsigned int i;

			for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
			for (i = 0; i < nvidia->num_instances; i++) {
				void __iomem *reg;

				reg = nvidia_smmu_page(smmu, i, page) + status;
@@ -112,9 +121,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,

static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
{
	struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
	unsigned int i;

	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
	for (i = 0; i < nvidia->num_instances; i++) {
		u32 val;
		void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
				    ARM_SMMU_GR0_sGFSR;
@@ -157,8 +167,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
	unsigned int inst;
	irqreturn_t ret = IRQ_NONE;
	struct arm_smmu_device *smmu = dev;
	struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);

	for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
	for (inst = 0; inst < nvidia->num_instances; inst++) {
		irqreturn_t irq_ret;

		irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
@@ -202,11 +213,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
	struct arm_smmu_device *smmu;
	struct iommu_domain *domain = dev;
	struct arm_smmu_domain *smmu_domain;
	struct nvidia_smmu *nvidia;

	smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
	smmu = smmu_domain->smmu;
	nvidia = to_nvidia_smmu(smmu);

	for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
	for (inst = 0; inst < nvidia->num_instances; inst++) {
		irqreturn_t irq_ret;

		/*
@@ -235,12 +248,16 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
	.context_fault = nvidia_smmu_context_fault,
};

static const struct arm_smmu_impl nvidia_smmu_single_impl = {
};

struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
{
	struct resource *res;
	struct device *dev = smmu->dev;
	struct nvidia_smmu *nvidia_smmu;
	struct platform_device *pdev = to_platform_device(dev);
	unsigned int i;

	nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL);
	if (!nvidia_smmu)
@@ -248,15 +265,23 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)

	/* Instance 0 is ioremapped by arm-smmu.c. */
	nvidia_smmu->bases[0] = smmu->base;
	nvidia_smmu->num_instances++;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	for (i = 1; i < MAX_SMMU_INSTANCES; i++) {
		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
		if (!res)
		return ERR_PTR(-ENODEV);
			break;

	nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
	if (IS_ERR(nvidia_smmu->bases[1]))
		return ERR_CAST(nvidia_smmu->bases[1]);
		nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res);
		if (IS_ERR(nvidia_smmu->bases[i]))
			return ERR_CAST(nvidia_smmu->bases[i]);

		nvidia_smmu->num_instances++;
	}

	if (nvidia_smmu->num_instances == 1)
		nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl;
	else
		nvidia_smmu->smmu.impl = &nvidia_smmu_impl;

	return &nvidia_smmu->smmu;