Commit 2d0b41a3 authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed
Browse files

net/mlx5: Refcount mlx5_irq with integer



Currently, all access to mlx5 IRQs are done undere a lock. Hance, there
isn't a reason to have kref in struct mlx5_irq.
Switch it to integer.

Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarParav Pandit <parav@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 68fefb70
Loading
Loading
Loading
Loading
+44 −21
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ struct mlx5_irq {
	cpumask_var_t mask;
	char name[MLX5_MAX_IRQ_NAME];
	struct mlx5_irq_pool *pool;
	struct kref kref;
	int refcount;
	u32 index;
	int irqn;
};
@@ -138,9 +138,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
	return ret;
}

static void irq_release(struct kref *kref)
static void irq_release(struct mlx5_irq *irq)
{
	struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
	struct mlx5_irq_pool *pool = irq->pool;

	xa_erase(&pool->irqs, irq->index);
@@ -159,10 +158,31 @@ static void irq_put(struct mlx5_irq *irq)
	struct mlx5_irq_pool *pool = irq->pool;

	mutex_lock(&pool->lock);
	kref_put(&irq->kref, irq_release);
	irq->refcount--;
	if (!irq->refcount)
		irq_release(irq);
	mutex_unlock(&pool->lock);
}

static int irq_get_locked(struct mlx5_irq *irq)
{
	lockdep_assert_held(&irq->pool->lock);
	if (WARN_ON_ONCE(!irq->refcount))
		return 0;
	irq->refcount++;
	return 1;
}

static int irq_get(struct mlx5_irq *irq)
{
	int err;

	mutex_lock(&irq->pool->lock);
	err = irq_get_locked(irq);
	mutex_unlock(&irq->pool->lock);
	return err;
}

static irqreturn_t irq_int_handler(int irq, void *nh)
{
	atomic_notifier_call_chain(nh, 0, NULL);
@@ -214,7 +234,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
		err = -ENOMEM;
		goto err_cpumask;
	}
	kref_init(&irq->kref);
	irq->refcount = 1;
	irq->index = i;
	err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
	if (err) {
@@ -235,18 +255,18 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)

int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
{
	int err;
	int ret;

	err = kref_get_unless_zero(&irq->kref);
	if (WARN_ON_ONCE(!err))
	ret = irq_get(irq);
	if (!ret)
		/* Something very bad happens here, we are enabling EQ
		 * on non-existing IRQ.
		 */
		return -ENOENT;
	err = atomic_notifier_chain_register(&irq->nh, nb);
	if (err)
	ret = atomic_notifier_chain_register(&irq->nh, nb);
	if (ret)
		irq_put(irq);
	return err;
	return ret;
}

int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
@@ -301,10 +321,9 @@ static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
	xa_for_each_range(&pool->irqs, index, iter, start, end) {
		if (!cpumask_equal(iter->mask, affinity))
			continue;
		if (kref_read(&iter->kref) < pool->min_threshold)
		if (iter->refcount < pool->min_threshold)
			return iter;
		if (!irq || kref_read(&iter->kref) <
		    kref_read(&irq->kref))
		if (!irq || iter->refcount < irq->refcount)
			irq = iter;
	}
	return irq;
@@ -319,7 +338,7 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
	mutex_lock(&pool->lock);
	least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
	if (least_loaded_irq &&
	    kref_read(&least_loaded_irq->kref) < pool->min_threshold)
	    least_loaded_irq->refcount < pool->min_threshold)
		goto out;
	new_irq = irq_pool_create_irq(pool, affinity);
	if (IS_ERR(new_irq)) {
@@ -337,11 +356,11 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
	least_loaded_irq = new_irq;
	goto unlock;
out:
	kref_get(&least_loaded_irq->kref);
	if (kref_read(&least_loaded_irq->kref) > pool->max_threshold)
	irq_get_locked(least_loaded_irq);
	if (least_loaded_irq->refcount > pool->max_threshold)
		mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
			      least_loaded_irq->irqn, pool->name,
			      kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ);
			      least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ);
unlock:
	mutex_unlock(&pool->lock);
	return least_loaded_irq;
@@ -357,7 +376,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
	mutex_lock(&pool->lock);
	irq = xa_load(&pool->irqs, vecidx);
	if (irq) {
		kref_get(&irq->kref);
		irq_get_locked(irq);
		goto unlock;
	}
	irq = irq_request(pool, vecidx);
@@ -424,7 +443,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
		return irq;
	mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
		      irq->irqn, cpumask_pr_args(affinity),
		      kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ);
		      irq->refcount / MLX5_EQ_REFS_PER_IRQ);
	return irq;
}

@@ -456,8 +475,12 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
	struct mlx5_irq *irq;
	unsigned long index;

	/* There are cases in which we are destrying the irq_table before
	 * freeing all the IRQs, fast teardown for example. Hence, free the irqs
	 * which might not have been freed.
	 */
	xa_for_each(&pool->irqs, index, irq)
		irq_release(&irq->kref);
		irq_release(irq);
	xa_destroy(&pool->irqs);
	kvfree(pool);
}