Unverified Commit cbd7cd48 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15308 CVE-2025-21732

Merge Pull Request from: @ci-robot 
 
PR sync from: Ye Bin <yebin10@huawei.com>
https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/7Q2EE3U6F5FW2TGYOIJUV6ONLXAP4UBW/ 
From: Ye Bin <yebin@huaweicloud.com>

Jason Gunthorpe (1):
  RDMA/mlx5: Ensure created mkeys always have a populated rb_key

Or Har-Toov (1):
  RDMA/mlx5: Change check for cacheable mkeys

Yishai Hadas (1):
  RDMA/mlx5: Fix a race for an ODP MR which leads to CQE with error

 
https://gitee.com/src-openeuler/kernel/issues/IBPC5V 
 
Link:https://gitee.com/openeuler/kernel/pulls/15308

 

Reviewed-by: default avatarZhang Changzhong <zhangchangzhong@huawei.com>
Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 83f86d69 83262ab5
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -659,6 +659,7 @@ struct mlx5_ib_mkey {
	/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
	struct mlx5r_cache_rb_key rb_key;
	struct mlx5_cache_ent *cache_ent;
	u8 cacheable : 1;
};

#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+36 −10
Original line number Diff line number Diff line
@@ -784,6 +784,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
	}
	mr->mmkey.cache_ent = ent;
	mr->mmkey.type = MLX5_MKEY_MR;
	mr->mmkey.rb_key = ent->rb_key;
	mr->mmkey.cacheable = true;
	init_waitqueue_head(&mr->mmkey.wait);
	return mr;
}
@@ -1192,6 +1194,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
		if (IS_ERR(mr))
			return mr;
		mr->mmkey.rb_key = rb_key;
		mr->mmkey.cacheable = true;
		return mr;
	}

@@ -1870,6 +1873,36 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
	return ret;
}

static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
	struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
	bool is_odp = is_odp_mr(mr);
	int ret = 0;

	if (is_odp)
		mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);

	if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
		goto out;

	if (ent) {
		xa_lock_irq(&ent->mkeys);
		ent->in_use--;
		mr->mmkey.cache_ent = NULL;
		xa_unlock_irq(&ent->mkeys);
	}
	ret = destroy_mkey(dev, mr);
out:
	if (is_odp) {
		if (!ret)
			to_ib_umem_odp(mr->umem)->private = NULL;
		mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
	}

	return ret;
}

int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
	struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1915,16 +1948,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
	}

	/* Stop DMA */
	if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
		if (mlx5r_umr_revoke_mr(mr) ||
		    cache_ent_find_and_store(dev, mr))
			mr->mmkey.cache_ent = NULL;

	if (!mr->mmkey.cache_ent) {
		rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
	rc = mlx5_revoke_mr(mr);
	if (rc)
		return rc;
	}

	if (mr->umem) {
		bool is_odp = is_odp_mr(mr);
+2 −0
Original line number Diff line number Diff line
@@ -250,6 +250,8 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
	if (!umem_odp->npages)
		goto out;
	mr = umem_odp->private;
	if (!mr)
		goto out;

	start = max_t(u64, ib_umem_start(umem_odp), range->start);
	end = min_t(u64, ib_umem_end(umem_odp), range->end);