Commit 8a60ed35 authored by Or Har-Toov's avatar Or Har-Toov Committed by Ye Bin
Browse files

RDMA/mlx5: Change check for cacheable mkeys

mainline inclusion
from mainline-v6.10-rc1
commit 8c1185fef68cc603b954fece2a434c9f851d6a86
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBPC5V
CVE: CVE-2025-21732

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=8c1185fef68cc603b954fece2a434c9f851d6a86



--------------------------------

umem can be NULL for user application mkeys in some cases. Therefore
umem can't be used for checking if the mkey is cacheable and it is
changed for checking a flag that indicates it. Also make sure that
all mkeys which are not returned to the cache will be destroyed.

Fixes: dd1b913f ("RDMA/mlx5: Cache all user cacheable mkeys on dereg MR flow")
Signed-off-by: default avatarOr Har-Toov <ohartoov@nvidia.com>
Link: https://lore.kernel.org/r/2690bc5c6896bcb937f89af16a1ff0343a7ab3d0.1712140377.git.leon@kernel.org


Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>

Conflicts:
	drivers/infiniband/hw/mlx5/mr.c
[Fix context diff as commit 57e7071683ef not merged]
Signed-off-by: default avatarYe Bin <yebin10@huawei.com>
parent 9d22d5b7
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -659,6 +659,7 @@ struct mlx5_ib_mkey {
	/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
	struct mlx5r_cache_rb_key rb_key;
	struct mlx5_cache_ent *cache_ent;
	u8 cacheable : 1;
};

#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+22 −10
Original line number Diff line number Diff line
@@ -1192,6 +1192,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
		if (IS_ERR(mr))
			return mr;
		mr->mmkey.rb_key = rb_key;
		mr->mmkey.cacheable = true;
		return mr;
	}

@@ -1202,6 +1203,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
	mr->ibmr.pd = pd;
	mr->umem = umem;
	mr->page_shift = order_base_2(page_size);
	mr->mmkey.cacheable = true;
	set_mr_fields(dev, mr, umem->length, access_flags, iova);

	return mr;
@@ -1870,6 +1872,23 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
	return ret;
}

static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;

	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
		return 0;

	if (ent) {
		xa_lock_irq(&ent->mkeys);
		ent->in_use--;
		mr->mmkey.cache_ent = NULL;
		xa_unlock_irq(&ent->mkeys);
	}
	return destroy_mkey(dev, mr);
}

int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
	struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1915,16 +1934,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
	}

	/* Stop DMA */
	if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
		if (mlx5r_umr_revoke_mr(mr) ||
		    cache_ent_find_and_store(dev, mr))
			mr->mmkey.cache_ent = NULL;

	if (!mr->mmkey.cache_ent) {
		rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
	rc = mlx5_revoke_mr(mr);
	if (rc)
		return rc;
	}

	if (mr->umem) {
		bool is_odp = is_odp_mr(mr);