Unverified Commit ab91812d authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4652 RDMA/hns: Support SCC context query and DSCP configuration.

Merge Pull Request from: @stinft 
 
Yixing Liu (1):
   RDMA/hns: Support DSCP of userspace
 wenglianfa (1):
   RDMA/hns: Append SCC context to the raw dump of QP Resource

https://gitee.com/openeuler/kernel/issues/I92J5Q
https://gitee.com/openeuler/kernel/issues/I8B8HH 
 
Link:https://gitee.com/openeuler/kernel/pulls/4652

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 9a3e1afa d0acb51b
Loading
Loading
Loading
Loading
+19 −2
Original line number Diff line number Diff line
@@ -59,8 +59,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
	struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
	struct hns_roce_ib_create_ah_resp resp = {};
	struct hns_roce_ah *ah = to_hr_ah(ibah);
	int ret = 0;
	u8 priority = 0;
	u8 tc_mode = 0;
	u32 max_sl;
	int ret;

	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
		return -EOPNOTSUPP;
@@ -76,7 +78,20 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
	ah->av.udp_sport = get_ah_udp_sport(ah_attr);
	ah->av.tclass = get_tclass(grh);

	ret = hr_dev->hw->get_dscp(hr_dev, get_tclass(grh), &tc_mode,
				   &priority);
	if (ret == -EOPNOTSUPP)
		ret = 0;

	if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
		return ret;

	if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
	    grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
		ah->av.sl = priority;
	else
		ah->av.sl = rdma_ah_get_sl(ah_attr);

	max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
	if (unlikely(ah->av.sl > max_sl)) {
		ibdev_err_ratelimited(&hr_dev->ib_dev,
@@ -99,6 +114,8 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
	}

	if (udata) {
		resp.priority = ah->av.sl;
		resp.tc_mode = tc_mode;
		memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
		ret = ib_copy_to_udata(udata, &resp,
				       min(udata->outlen, sizeof(resp)));
+3 −0
Original line number Diff line number Diff line
@@ -108,6 +108,9 @@ enum {
	HNS_ROCE_CMD_QUERY_CEQC		= 0x92,
	HNS_ROCE_CMD_DESTROY_CEQC	= 0x93,

	/* SCC CTX commands */
	HNS_ROCE_CMD_QUERY_SCCC		= 0xa2,

	/* SCC CTX BT commands */
	HNS_ROCE_CMD_READ_SCCC_BT0	= 0xa4,
	HNS_ROCE_CMD_WRITE_SCCC_BT0	= 0xa5,
+5 −0
Original line number Diff line number Diff line
@@ -637,6 +637,8 @@ struct hns_roce_qp {
	struct list_head	sq_node; /* all send qps are on a list */
	struct hns_user_mmap_entry *dwqe_mmap_entry;
	u32			config;
	u8			tc_mode;
	u8			priority;
};

struct hns_roce_ib_iboe {
@@ -944,6 +946,9 @@ struct hns_roce_hw {
	int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
	int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
	int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
	int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
			u8 *tc_mode, u8 *priority);
	int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
	int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer);
	int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
				u64 *stats, u32 port, int *hw_counters);
+83 −17
Original line number Diff line number Diff line
@@ -4832,6 +4832,60 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
	return 0;
}

int hns_roce_hw_v2_get_dscp(struct hns_roce_dev *hr_dev, u8 dscp,
			    u8 *tc_mode, u8 *priority)
{
	struct hns_roce_v2_priv *priv = hr_dev->priv;
	struct hnae3_handle *handle = priv->handle;
	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;

	if (!ops->get_dscp_prio)
		return -EOPNOTSUPP;

	return ops->get_dscp_prio(handle, dscp, tc_mode, priority);
}

static int hns_roce_set_sl(struct ib_qp *ibqp,
			   const struct ib_qp_attr *attr,
			   struct hns_roce_v2_qp_context *context,
			   struct hns_roce_v2_qp_context *qpc_mask)
{
	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	struct ib_device *ibdev = &hr_dev->ib_dev;
	u32 sl_num;
	int ret;

	ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh),
				      &hr_qp->tc_mode, &hr_qp->priority);
	if (ret && ret != -EOPNOTSUPP &&
	    grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
		ibdev_err_ratelimited(ibdev,
				      "failed to get dscp, ret = %d.\n", ret);
		return ret;
	}

	if (hr_qp->tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
	    grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
		hr_qp->sl = hr_qp->priority;
	else
		hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);

	sl_num = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
	if (unlikely(hr_qp->sl > sl_num)) {
		ibdev_err_ratelimited(ibdev,
		   "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
		   hr_qp->sl, sl_num);
		return -EINVAL;
	}

	hr_reg_write(context, QPC_SL, hr_qp->sl);
	hr_reg_clear(qpc_mask, QPC_SL);

	return 0;
}

static int hns_roce_v2_set_path(struct ib_qp *ibqp,
				const struct ib_qp_attr *attr,
				int attr_mask,
@@ -4843,32 +4897,22 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	struct ib_device *ibdev = &hr_dev->ib_dev;
	const struct ib_gid_attr *gid_attr = NULL;
	u8 sl = rdma_ah_get_sl(&attr->ah_attr);
	int is_roce_protocol;
	u16 vlan_id = 0xffff;
	bool is_udp = false;
	u32 max_sl;
	u8 ib_port;
	u8 hr_port;
	int ret;

	max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
	if (unlikely(sl > max_sl)) {
		ibdev_err_ratelimited(ibdev,
				      "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
				      sl, max_sl);
		return -EINVAL;
	}

	/*
	 * If free_mr_en of qp is set, it means that this qp comes from
	 * free mr. This qp will perform the loopback operation.
	 * In the loopback scenario, only sl needs to be set.
	 */
	if (hr_qp->free_mr_en) {
		hr_reg_write(context, QPC_SL, sl);
		hr_reg_write(context, QPC_SL, rdma_ah_get_sl(&attr->ah_attr));
		hr_reg_clear(qpc_mask, QPC_SL);
		hr_qp->sl = sl;
		hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
		return 0;
	}

@@ -4935,11 +4979,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
	memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
	memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));

	hr_qp->sl = sl;
	hr_reg_write(context, QPC_SL, hr_qp->sl);
	hr_reg_clear(qpc_mask, QPC_SL);

	return 0;
	return  hns_roce_set_sl(ibqp, attr, context, qpc_mask);
}

static bool check_qp_state(enum ib_qp_state cur_state,
@@ -5321,6 +5361,30 @@ static int hns_roce_v2_query_srqc(struct hns_roce_dev *hr_dev, u32 srqn,
	return ret;
}

static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
				  void *buffer)
{
	struct hns_roce_v2_scc_context *context;
	struct hns_roce_cmd_mailbox *mailbox;
	int ret;

	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);

	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
				qpn);
	if (ret)
		goto out;

	context = mailbox->buf;
	memcpy(buffer, context, sizeof(*context));

out:
	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
	return ret;
}

static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
			      struct hns_roce_v2_qp_context *context)
{
@@ -6712,6 +6776,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
	.query_cqc = hns_roce_v2_query_cqc,
	.query_qpc = hns_roce_v2_query_qpc,
	.query_mpt = hns_roce_v2_query_mpt,
	.get_dscp = hns_roce_hw_v2_get_dscp,
	.query_sccc = hns_roce_v2_query_sccc,
	.query_srqc = hns_roce_v2_query_srqc,
	.query_hw_counter = hns_roce_hw_v2_query_counter,
	.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
+6 −0
Original line number Diff line number Diff line
@@ -646,6 +646,12 @@ struct hns_roce_v2_qp_context {
#define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23)
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)

#define SCC_CONTEXT_SIZE 16

struct hns_roce_v2_scc_context {
	__le32 data[SCC_CONTEXT_SIZE];
};

#define	V2_QP_RWE_S 1 /* rdma write enable */
#define	V2_QP_RRE_S 2 /* rdma read enable */
#define	V2_QP_ATE_S 3 /* rdma atomic enable */
Loading