Commit 235a25fe authored by Eli Cohen's avatar Eli Cohen Committed by Saeed Mahameed
Browse files

net/mlx5: Modify struct mlx5_irq to use struct msi_map



Use the standard struct msi_map to store the vector number and irq
number pair in struct mlx5_irq.

Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Reviewed-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Reviewed-by: default avatarJacob Keller <jacob.e.keller@intel.com>
parent 40a252c1
Loading
Loading
Loading
Loading
+15 −16
Original line number Diff line number Diff line
@@ -29,8 +29,7 @@ struct mlx5_irq {
	char name[MLX5_MAX_IRQ_NAME];
	struct mlx5_irq_pool *pool;
	int refcount;
	u32 index;
	int irqn;
	struct msi_map map;
};

struct mlx5_irq_table {
@@ -128,14 +127,14 @@ static void irq_release(struct mlx5_irq *irq)
{
	struct mlx5_irq_pool *pool = irq->pool;

	xa_erase(&pool->irqs, irq->index);
	xa_erase(&pool->irqs, irq->map.index);
	/* free_irq requires that affinity_hint and rmap will be cleared
	 * before calling it. This is why there is asymmetry with set_rmap
	 * which should be called after alloc_irq but before request_irq.
	 */
	irq_update_affinity_hint(irq->irqn, NULL);
	irq_update_affinity_hint(irq->map.virq, NULL);
	free_cpumask_var(irq->mask);
	free_irq(irq->irqn, &irq->nh);
	free_irq(irq->map.virq, &irq->nh);
	kfree(irq);
}

@@ -217,7 +216,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
	irq = kzalloc(sizeof(*irq), GFP_KERNEL);
	if (!irq)
		return ERR_PTR(-ENOMEM);
	irq->irqn = pci_irq_vector(dev->pdev, i);
	irq->map.virq = pci_irq_vector(dev->pdev, i);
	if (!mlx5_irq_pool_is_sf_pool(pool))
		irq_set_name(pool, name, i);
	else
@@ -225,7 +224,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
	ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
	snprintf(irq->name, MLX5_MAX_IRQ_NAME,
		 "%s@pci:%s", name, pci_name(dev->pdev));
	err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
	err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
			  &irq->nh);
	if (err) {
		mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
@@ -238,23 +237,23 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
	}
	if (affinity) {
		cpumask_copy(irq->mask, affinity);
		irq_set_affinity_and_hint(irq->irqn, irq->mask);
		irq_set_affinity_and_hint(irq->map.virq, irq->mask);
	}
	irq->pool = pool;
	irq->refcount = 1;
	irq->index = i;
	err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
	irq->map.index = i;
	err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL));
	if (err) {
		mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
			      irq->index, err);
			      irq->map.index, err);
		goto err_xa;
	}
	return irq;
err_xa:
	irq_update_affinity_hint(irq->irqn, NULL);
	irq_update_affinity_hint(irq->map.virq, NULL);
	free_cpumask_var(irq->mask);
err_cpumask:
	free_irq(irq->irqn, &irq->nh);
	free_irq(irq->map.virq, &irq->nh);
err_req_irq:
	kfree(irq);
	return ERR_PTR(err);
@@ -292,7 +291,7 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)

int mlx5_irq_get_index(struct mlx5_irq *irq)
{
	return irq->index;
	return irq->map.index;
}

/* irq_pool API */
@@ -364,7 +363,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
	int i;

	for (i = 0; i < nirqs; i++) {
		synchronize_irq(irqs[i]->irqn);
		synchronize_irq(irqs[i]->map.virq);
		mlx5_irq_put(irqs[i]);
	}
}
@@ -433,7 +432,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
	if (IS_ERR(irq))
		return irq;
	mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
		      irq->irqn, cpumask_pr_args(affinity),
		      irq->map.virq, cpumask_pr_args(affinity),
		      irq->refcount / MLX5_EQ_REFS_PER_IRQ);
	return irq;
}