Commit 9cf62d91 authored by Tetsuo Handa's avatar Tetsuo Handa Committed by Jason Gunthorpe
Browse files

RDMA/mlx4: Avoid flush_scheduled_work() usage

Flushing system-wide workqueues is dangerous and will be forbidden.
Replace system_wq with local cm_wq.

Link: https://lore.kernel.org/r/22f7183b-cc16-5a34-e879-7605f5efc6e6@I-love.SAKURA.ne.jp


Signed-off-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 549f39a5
Loading
Loading
Loading
Loading
+22 −7
Original line number Diff line number Diff line
@@ -80,6 +80,7 @@ struct cm_req_msg {
	union ib_gid primary_path_sgid;
};

static struct workqueue_struct *cm_wq;

static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
{
@@ -288,10 +289,10 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
	/*make sure that there is no schedule inside the scheduled work.*/
	if (!sriov->is_going_down && !id->scheduled_delete) {
		id->scheduled_delete = 1;
		schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
		queue_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
	} else if (id->scheduled_delete) {
		/* Adjust timeout if already scheduled */
		mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
		mod_delayed_work(cm_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
	}
	spin_unlock_irqrestore(&sriov->going_down_lock, flags);
	spin_unlock(&sriov->id_map_lock);
@@ -370,7 +371,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
			ret =  xa_err(item);
		else
			/* If a retry, adjust delayed work */
			mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
			mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
		goto err_or_exists;
	}
	xa_unlock(&sriov->xa_rej_tmout);
@@ -393,7 +394,7 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
		return xa_err(old);
	}

	schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
	queue_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);

	return 0;

@@ -500,7 +501,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
	xa_lock(&sriov->xa_rej_tmout);
	xa_for_each(&sriov->xa_rej_tmout, id, item) {
		if (slave < 0 || slave == item->slave) {
			mod_delayed_work(system_wq, &item->timeout, 0);
			mod_delayed_work(cm_wq, &item->timeout, 0);
			flush_needed = true;
			++cnt;
		}
@@ -508,7 +509,7 @@ static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
	xa_unlock(&sriov->xa_rej_tmout);

	if (flush_needed) {
		flush_scheduled_work();
		flush_workqueue(cm_wq);
		pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
			 cnt, slave);
	}
@@ -540,7 +541,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
	spin_unlock(&sriov->id_map_lock);

	if (need_flush)
		flush_scheduled_work(); /* make sure all timers were flushed */
		flush_workqueue(cm_wq); /* make sure all timers were flushed */

	/* now, remove all leftover entries from databases*/
	spin_lock(&sriov->id_map_lock);
@@ -587,3 +588,17 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)

	rej_tmout_xa_cleanup(sriov, slave);
}

int mlx4_ib_cm_init(void)
{
	cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
	if (!cm_wq)
		return -ENOMEM;

	return 0;
}

void mlx4_ib_cm_destroy(void)
{
	destroy_workqueue(cm_wq);
}
+9 −1
Original line number Diff line number Diff line
@@ -3307,10 +3307,14 @@ static int __init mlx4_ib_init(void)
	if (!wq)
		return -ENOMEM;

	err = mlx4_ib_mcg_init();
	err = mlx4_ib_cm_init();
	if (err)
		goto clean_wq;

	err = mlx4_ib_mcg_init();
	if (err)
		goto clean_cm;

	err = mlx4_register_interface(&mlx4_ib_interface);
	if (err)
		goto clean_mcg;
@@ -3320,6 +3324,9 @@ static int __init mlx4_ib_init(void)
clean_mcg:
	mlx4_ib_mcg_destroy();

clean_cm:
	mlx4_ib_cm_destroy();

clean_wq:
	destroy_workqueue(wq);
	return err;
@@ -3329,6 +3336,7 @@ static void __exit mlx4_ib_cleanup(void)
{
	mlx4_unregister_interface(&mlx4_ib_interface);
	mlx4_ib_mcg_destroy();
	mlx4_ib_cm_destroy();
	destroy_workqueue(wq);
}

+3 −0
Original line number Diff line number Diff line
@@ -937,4 +937,7 @@ mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
				       int *num_of_mtts);

int mlx4_ib_cm_init(void);
void mlx4_ib_cm_destroy(void);

#endif /* MLX4_IB_H */