Commit c99a7457 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Leon Romanovsky
Browse files

RDMA/mlx5: Remove not-used cache disable flag

During execution of mlx5_mkey_cache_cleanup(), there is a guarantee
that MR are not registered and/or destroyed. It means that we don't
need newly introduced cache disable flag.

Fixes: 374012b0 ("RDMA/mlx5: Fix mkey cache possible deadlock on cleanup")
Link: https://lore.kernel.org/r/c7e9c9f98c8ae4a7413d97d9349b29f5b0a23dbe.1695921626.git.leon@kernel.org


Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent e0fe97ef
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -798,7 +798,6 @@ struct mlx5_mkey_cache {
	struct dentry		*fs_root;
	unsigned long		last_add;
	struct delayed_work	remove_ent_dwork;
	u8			disable: 1;
};

struct mlx5_ib_port_resources {
+0 −5
Original line number Diff line number Diff line
@@ -1026,7 +1026,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
		return;

	mutex_lock(&dev->cache.rb_lock);
	dev->cache.disable = true;
	for (node = rb_first(root); node; node = rb_next(node)) {
		ent = rb_entry(node, struct mlx5_cache_ent, node);
		xa_lock_irq(&ent->mkeys);
@@ -1830,10 +1829,6 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
	}

	mutex_lock(&cache->rb_lock);
	if (cache->disable) {
		mutex_unlock(&cache->rb_lock);
		return 0;
	}
	ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
	if (ent) {
		if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {