Unverified Commit 0cdb32a7 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14664 v2 CVE-2024-53224

Merge Pull Request from: @ci-robot 
 
PR sync from: Pu Lehui <pulehui@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/SADUYGMWKTAPJEZKR4THICSTXTBLBP2Y/ 
Jianbo Liu (1):
  IB/mlx5: Allocate resources just before first QP/SRQ is created

Patrisious Haddad (1):
  RDMA/mlx5: Move events notifier registration to be after device
    registration


-- 
2.34.1
 
https://gitee.com/src-openeuler/kernel/issues/IBEADY 
 
Link:https://gitee.com/openeuler/kernel/pulls/14664

 

Reviewed-by: default avatarZhang Changzhong <zhangchangzhong@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents aa22722c 3fe2be04
Loading
Loading
Loading
Loading
+123 −62
Original line number Diff line number Diff line
@@ -2796,37 +2796,72 @@ static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
	}
}

static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_ib_resources *devr = &dev->devr;
	struct ib_srq_init_attr attr;
	struct ib_device *ibdev;
	struct ib_cq_init_attr cq_attr = {.cqe = 1};
	int port;
	struct ib_device *ibdev;
	struct ib_pd *pd;
	struct ib_cq *cq;
	int ret = 0;


	/*
	 * devr->c0 is set once, never changed until device unload.
	 * Avoid taking the mutex if initialization is already done.
	 */
	if (devr->c0)
		return 0;

	mutex_lock(&devr->cq_lock);
	if (devr->c0)
		goto unlock;

	ibdev = &dev->ib_dev;
	pd = ib_alloc_pd(ibdev, 0);
	if (IS_ERR(pd)) {
		ret = PTR_ERR(pd);
		mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret);
		goto unlock;
	}

	if (!MLX5_CAP_GEN(dev->mdev, xrc))
		return -EOPNOTSUPP;
	cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
	if (IS_ERR(cq)) {
		ret = PTR_ERR(cq);
		mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret);
		ib_dealloc_pd(pd);
		goto unlock;
	}

	devr->p0 = ib_alloc_pd(ibdev, 0);
	if (IS_ERR(devr->p0))
		return PTR_ERR(devr->p0);
	devr->p0 = pd;
	devr->c0 = cq;

	devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
	if (IS_ERR(devr->c0)) {
		ret = PTR_ERR(devr->c0);
		goto error1;
unlock:
	mutex_unlock(&devr->cq_lock);
	return ret;
}

	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
	if (ret)
		goto error2;
int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_ib_resources *devr = &dev->devr;
	struct ib_srq_init_attr attr;
	struct ib_srq *s0, *s1;
	int ret = 0;

	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
	/*
	 * devr->s1 is set once, never changed until device unload.
	 * Avoid taking the mutex if initialization is already done.
	 */
	if (devr->s1)
		return 0;

	mutex_lock(&devr->srq_lock);
	if (devr->s1)
		goto unlock;

	ret = mlx5_ib_dev_res_cq_init(dev);
	if (ret)
		goto error3;
		goto unlock;

	memset(&attr, 0, sizeof(attr));
	attr.attr.max_sge = 1;
@@ -2834,10 +2869,11 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
	attr.srq_type = IB_SRQT_XRC;
	attr.ext.cq = devr->c0;

	devr->s0 = ib_create_srq(devr->p0, &attr);
	if (IS_ERR(devr->s0)) {
		ret = PTR_ERR(devr->s0);
		goto err_create;
	s0 = ib_create_srq(devr->p0, &attr);
	if (IS_ERR(s0)) {
		ret = PTR_ERR(s0);
		mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret);
		goto unlock;
	}

	memset(&attr, 0, sizeof(attr));
@@ -2845,52 +2881,64 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
	attr.attr.max_wr = 1;
	attr.srq_type = IB_SRQT_BASIC;

	devr->s1 = ib_create_srq(devr->p0, &attr);
	if (IS_ERR(devr->s1)) {
		ret = PTR_ERR(devr->s1);
		goto error6;
	s1 = ib_create_srq(devr->p0, &attr);
	if (IS_ERR(s1)) {
		ret = PTR_ERR(s1);
		mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret);
		ib_destroy_srq(s0);
	}

	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
		INIT_WORK(&devr->ports[port].pkey_change_work,
			  pkey_change_handler);
	devr->s0 = s0;
	devr->s1 = s1;

	return 0;
unlock:
	mutex_unlock(&devr->srq_lock);
	return ret;
}

error6:
	ib_destroy_srq(devr->s0);
err_create:
	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
error3:
static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_ib_resources *devr = &dev->devr;
	int ret;

	if (!MLX5_CAP_GEN(dev->mdev, xrc))
		return -EOPNOTSUPP;

	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
	if (ret)
		return ret;

	ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
	if (ret) {
		mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
error2:
	ib_destroy_cq(devr->c0);
error1:
	ib_dealloc_pd(devr->p0);
		return ret;
	}

	mutex_init(&devr->cq_lock);
	mutex_init(&devr->srq_lock);

	return 0;
}

static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
{
	struct mlx5_ib_resources *devr = &dev->devr;
	int port;

	/*
	 * Make sure no change P_Key work items are still executing.
	 *
	 * At this stage, the mlx5_ib_event should be unregistered
	 * and it ensures that no new works are added.
	 */
	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
		cancel_work_sync(&devr->ports[port].pkey_change_work);

	/* After s0/s1 init, they are not unset during the device lifetime. */
	if (devr->s1) {
		ib_destroy_srq(devr->s1);
		ib_destroy_srq(devr->s0);
	}
	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
	mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
	/* After p0/c0 init, they are not unset during the device lifetime. */
	if (devr->c0) {
		ib_destroy_cq(devr->c0);
		ib_dealloc_pd(devr->p0);
	}
	mutex_destroy(&devr->cq_lock);
	mutex_destroy(&devr->srq_lock);
}

static u32 get_core_cap_flags(struct ib_device *ibdev,
			      struct mlx5_hca_vport_context *rep)
@@ -4138,6 +4186,13 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)

static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_ib_resources *devr = &dev->devr;
	int port;

	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
		INIT_WORK(&devr->ports[port].pkey_change_work,
			  pkey_change_handler);

	dev->mdev_events.notifier_call = mlx5_ib_event;
	mlx5_notifier_register(dev->mdev, &dev->mdev_events);

@@ -4148,8 +4203,14 @@ static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)

static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
{
	struct mlx5_ib_resources *devr = &dev->devr;
	int port;

	mlx5r_macsec_event_unregister(dev);
	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);

	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
		cancel_work_sync(&devr->ports[port].pkey_change_work);
}

void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
@@ -4223,9 +4284,6 @@ static const struct mlx5_ib_profile pf_profile = {
	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
		     mlx5_ib_dev_res_init,
		     mlx5_ib_dev_res_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
		     mlx5_ib_stage_dev_notifier_init,
		     mlx5_ib_stage_dev_notifier_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_ODP,
		     mlx5_ib_odp_init_one,
		     mlx5_ib_odp_cleanup_one),
@@ -4250,6 +4308,9 @@ static const struct mlx5_ib_profile pf_profile = {
	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
		     mlx5_ib_stage_ib_reg_init,
		     mlx5_ib_stage_ib_reg_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
		     mlx5_ib_stage_dev_notifier_init,
		     mlx5_ib_stage_dev_notifier_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
		     mlx5_ib_stage_post_ib_reg_umr_init,
		     NULL),
@@ -4286,9 +4347,6 @@ const struct mlx5_ib_profile raw_eth_profile = {
	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
		     mlx5_ib_dev_res_init,
		     mlx5_ib_dev_res_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
		     mlx5_ib_stage_dev_notifier_init,
		     mlx5_ib_stage_dev_notifier_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
		     mlx5_ib_counters_init,
		     mlx5_ib_counters_cleanup),
@@ -4310,6 +4368,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
		     mlx5_ib_stage_ib_reg_init,
		     mlx5_ib_stage_ib_reg_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
		     mlx5_ib_stage_dev_notifier_init,
		     mlx5_ib_stage_dev_notifier_cleanup),
	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
		     mlx5_ib_stage_post_ib_reg_umr_init,
		     NULL),
+5 −1
Original line number Diff line number Diff line
@@ -820,11 +820,13 @@ struct mlx5_ib_port_resources {

struct mlx5_ib_resources {
	struct ib_cq	*c0;
	struct mutex cq_lock;
	u32 xrcdn0;
	u32 xrcdn1;
	struct ib_pd	*p0;
	struct ib_srq	*s0;
	struct ib_srq	*s1;
	struct mutex srq_lock;
	struct mlx5_ib_port_resources ports[2];
};

@@ -952,7 +954,6 @@ enum mlx5_ib_stages {
	MLX5_IB_STAGE_QP,
	MLX5_IB_STAGE_SRQ,
	MLX5_IB_STAGE_DEVICE_RESOURCES,
	MLX5_IB_STAGE_DEVICE_NOTIFIER,
	MLX5_IB_STAGE_ODP,
	MLX5_IB_STAGE_COUNTERS,
	MLX5_IB_STAGE_CONG_DEBUGFS,
@@ -961,6 +962,7 @@ enum mlx5_ib_stages {
	MLX5_IB_STAGE_PRE_IB_REG_UMR,
	MLX5_IB_STAGE_WHITELIST_UID,
	MLX5_IB_STAGE_IB_REG,
	MLX5_IB_STAGE_DEVICE_NOTIFIER,
	MLX5_IB_STAGE_POST_IB_REG_UMR,
	MLX5_IB_STAGE_DELAY_DROP,
	MLX5_IB_STAGE_RESTRACK,
@@ -1270,6 +1272,8 @@ to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
		struct mlx5_user_mmap_entry, rdma_entry);
}

int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev);
int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev);
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
			struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
+4 −0
Original line number Diff line number Diff line
@@ -3247,6 +3247,10 @@ int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
	enum ib_qp_type type;
	int err;

	err = mlx5_ib_dev_res_srq_init(dev);
	if (err)
		return err;

	err = check_qp_type(dev, attr, &type);
	if (err)
		return err;
+4 −0
Original line number Diff line number Diff line
@@ -216,6 +216,10 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
		return -EINVAL;
	}

	err = mlx5_ib_dev_res_cq_init(dev);
	if (err)
		return err;

	mutex_init(&srq->mutex);
	spin_lock_init(&srq->lock);
	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);