Commit 29a877d5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "A mixture of small bug fixes and a small security issue:

   - WARN_ON when IPoIB is automatically moved between namespaces

   - Long standing bug where mlx5 would use the wrong page for the
     doorbell recovery memory if fork is used

   - Security fix for mlx4 that disables the timestamp feature

   - Several crashers for mlx5

   - Plug a recent mlx5 memory leak for the sig_mr"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/mlx5: Fix initializing CQ fragments buffer
  RDMA/mlx5: Delete right entry from MR signature database
  RDMA: Verify port when creating flow rule
  RDMA/mlx5: Block FDB rules when not in switchdev mode
  RDMA/mlx4: Do not map the core_clock page to user space unless enabled
  RDMA/mlx5: Use different doorbell memory for different processes
  RDMA/ipoib: Fix warning caused by destroying non-initial netns
parents cd1245d7 2ba0aa2f
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -3248,6 +3248,11 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
		goto err_free_attr;
	}

	if (!rdma_is_port_valid(uobj->context->device, cmd.flow_attr.port)) {
		err = -EINVAL;
		goto err_uobj;
	}

	qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
	if (!qp) {
		err = -EINVAL;
+1 −7
Original line number Diff line number Diff line
@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
	props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
	props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;

	if (!mlx4_is_slave(dev->dev))
		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);

	if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
		resp.response_length += sizeof(resp.hca_core_clock_offset);
		if (!err && !mlx4_is_slave(dev->dev)) {
		if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
			resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
			resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
		}
@@ -1702,9 +1699,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
	struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
	int is_bonded = mlx4_is_bonded(dev);

	if (!rdma_is_port_valid(qp->device, flow_attr->port))
		return ERR_PTR(-EINVAL);

	if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
		return ERR_PTR(-EOPNOTSUPP);

+4 −5
Original line number Diff line number Diff line
@@ -849,15 +849,14 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
	ib_umem_release(cq->buf.umem);
}

static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
			     struct mlx5_ib_cq_buf *buf)
static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
{
	int i;
	void *cqe;
	struct mlx5_cqe64 *cqe64;

	for (i = 0; i < buf->nent; i++) {
		cqe = get_cqe(cq, i);
		cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
		cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
		cqe64->op_own = MLX5_CQE_INVALID << 4;
	}
@@ -883,7 +882,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
	if (err)
		goto err_db;

	init_cq_frag_buf(cq, &cq->buf);
	init_cq_frag_buf(&cq->buf);

	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
@@ -1184,7 +1183,7 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
	if (err)
		goto ex;

	init_cq_frag_buf(cq, cq->resize_buf);
	init_cq_frag_buf(cq->resize_buf);

	return 0;

+6 −1
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ struct mlx5_ib_user_db_page {
	struct ib_umem	       *umem;
	unsigned long		user_virt;
	int			refcnt;
	struct mm_struct	*mm;
};

int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
@@ -53,7 +54,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
	mutex_lock(&context->db_page_mutex);

	list_for_each_entry(page, &context->db_page_list, list)
		if (page->user_virt == (virt & PAGE_MASK))
		if ((current->mm == page->mm) &&
		    (page->user_virt == (virt & PAGE_MASK)))
			goto found;

	page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -71,6 +73,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
		kfree(page);
		goto out;
	}
	mmgrab(current->mm);
	page->mm = current->mm;

	list_add(&page->list, &context->db_page_list);

@@ -91,6 +95,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)

	if (!--db->u.user_page->refcnt) {
		list_del(&db->u.user_page->list);
		mmdrop(db->u.user_page->mm);
		ib_umem_release(db->u.user_page->umem);
		kfree(db->u.user_page);
	}
+8 −3
Original line number Diff line number Diff line
@@ -1194,9 +1194,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
		goto free_ucmd;
	}

	if (flow_attr->port > dev->num_ports ||
	    (flow_attr->flags &
	     ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
	if (flow_attr->flags &
	    ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
		err = -EINVAL;
		goto free_ucmd;
	}
@@ -2134,6 +2133,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
	if (err)
		goto end;

	if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
	    mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
		err = -EINVAL;
		goto end;
	}

	uobj->object = obj;
	obj->mdev = dev->mdev;
	atomic_set(&obj->usecnt, 0);
Loading