Commit 822cf785 authored by Aharon Landau's avatar Aharon Landau Committed by Jason Gunthorpe
Browse files

RDMA/nldev: Split nldev_stat_set_mode_doit out of nldev_stat_set_doit

In order to allow expansion of the set command with more set options, take
the set mode out of the main set function.

Link: https://lore.kernel.org/r/20211008122439.166063-9-markzhang@nvidia.com


Signed-off-by: default avatarAharon Landau <aharonl@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarMark Zhang <markzhang@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 7301d0a9
Loading
Loading
Loading
Loading
+70 −46
Original line number Diff line number Diff line
@@ -1897,24 +1897,67 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
	return err;
}

static int nldev_stat_set_mode_doit(struct sk_buff *msg,
				    struct netlink_ext_ack *extack,
				    struct nlattr *tb[],
				    struct ib_device *device, u32 port)
{
	u32 mode, mask = 0, qpn, cntn = 0;
	int ret;

	/* Currently only counter for QP is supported */
	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
		return -EINVAL;

	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
	if (mode == RDMA_COUNTER_MODE_AUTO) {
		if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
			mask = nla_get_u32(
				tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
		return rdma_counter_set_auto_mode(device, port, mask, extack);
	}

	if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
		return -EINVAL;

	qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
	if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
		cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
		ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
		if (ret)
			return ret;
	} else {
		ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn);
		if (ret)
			return ret;
	}

	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
		ret = -EMSGSIZE;
		goto err_fill;
	}

	return 0;

err_fill:
	rdma_counter_unbind_qpn(device, port, qpn, cntn);
	return ret;
}

static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
{
	u32 index, port, mode, mask = 0, qpn, cntn = 0;
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct ib_device *device;
	struct sk_buff *msg;
	u32 index, port;
	int ret;

	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, extack);
	/* Currently only counter for QP is supported */
	if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
		return -EINVAL;

	if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
			  extack);
	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
@@ -1925,59 +1968,40 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
	if (!rdma_is_port_valid(device, port)) {
		ret = -EINVAL;
		goto err;
		goto err_put_device;
	}

	if (!tb[RDMA_NLDEV_ATTR_STAT_MODE]) {
		ret = -EINVAL;
		goto err_put_device;
	}

	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (!msg) {
		ret = -ENOMEM;
		goto err;
		goto err_put_device;
	}
	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
					 RDMA_NLDEV_CMD_STAT_SET),
			0, 0);

	mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
	if (mode == RDMA_COUNTER_MODE_AUTO) {
		if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
			mask = nla_get_u32(
				tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
		ret = rdma_counter_set_auto_mode(device, port, mask, extack);
		if (ret)
			goto err_msg;
	} else {
		if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
			goto err_msg;
		qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
		if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
			cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
			ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
		} else {
			ret = rdma_counter_bind_qpn_alloc(device, port,
							  qpn, &cntn);
		}
		if (ret)
			goto err_msg;

	if (fill_nldev_handle(msg, device) ||
		    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
		    nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
		ret = -EMSGSIZE;
			goto err_fill;
		}
		goto err_free_msg;
	}

	ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port);
	if (ret)
		goto err_free_msg;

	nlmsg_end(msg, nlh);
	ib_device_put(device);
	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);

err_fill:
	rdma_counter_unbind_qpn(device, port, qpn, cntn);
err_msg:
err_free_msg:
	nlmsg_free(msg);
err:
err_put_device:
	ib_device_put(device);
	return ret;
}