Unverified Commit 162d1b0b authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!837 Backport bugfixes for RDMA/hns

Merge Pull Request from: @stinft 
 
#I76PY9 
#I76PUJ 
#I76PRT  
 
Link:https://gitee.com/openeuler/kernel/pulls/837

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents edb5d824 62df4e45
Loading
Loading
Loading
Loading
+17 −8
Original line number Diff line number Diff line
@@ -4920,11 +4920,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
	mtu = ib_mtu_enum_to_int(ib_mtu);
	if (WARN_ON(mtu <= 0))
		return -EINVAL;
#define MAX_LP_MSG_LEN 16384
	/* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
	lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
	if (WARN_ON(lp_pktn_ini >= 0xF))
		return -EINVAL;
#define MIN_LP_MSG_LEN 1024
	/* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
	lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);

	if (attr_mask & IB_QP_PATH_MTU) {
		hr_reg_write(context, QPC_MTU, ib_mtu);
@@ -5405,7 +5403,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
{
#define QP_ACK_TIMEOUT_MAX_HIP08 20
#define QP_ACK_TIMEOUT_OFFSET 10
#define QP_ACK_TIMEOUT_MAX 31

	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
@@ -5414,7 +5411,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
				   "Local ACK timeout shall be 0 to 20.\n");
			return false;
		}
		*timeout += QP_ACK_TIMEOUT_OFFSET;
		*timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
	} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
		if (*timeout > QP_ACK_TIMEOUT_MAX) {
			ibdev_warn(&hr_dev->ib_dev,
@@ -5744,6 +5741,18 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
	return ret;
}

static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
			      struct hns_roce_v2_qp_context *context)
{
	u8 timeout;

	timeout = (u8)hr_reg_read(context, QPC_AT);
	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
		timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;

	return timeout;
}

static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
				int qp_attr_mask,
				struct ib_qp_init_attr *qp_init_attr)
@@ -5821,7 +5830,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
	qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);

	qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
	qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
	qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
	qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
	qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);

+2 −0
Original line number Diff line number Diff line
@@ -43,6 +43,8 @@
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS		0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS		0x1000000

#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08    10

#define HNS_ROCE_V3_SCCC_SZ			64
#define HNS_ROCE_V3_GMV_ENTRY_SZ		32

+43 −0
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@
#include <linux/vmalloc.h>
#include <linux/count_zeros.h>
#include <rdma/ib_umem.h>
#include <linux/bitops.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
@@ -1070,6 +1071,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
	return 0;
}

static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
{
	return int_pow(ba_per_bt, hopnum - 1);
}

static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
				      struct hns_roce_mtr *mtr,
				      unsigned int pg_shift)
{
	unsigned long cap = hr_dev->caps.page_size_cap;
	struct hns_roce_buf_region *re;
	unsigned int pgs_per_l1ba;
	unsigned int ba_per_bt;
	unsigned int ba_num;
	int i;

	for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
		if (!(BIT(pg_shift) & cap))
			continue;

		ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
		ba_num = 0;
		for (i = 0; i < mtr->hem_cfg.region_count; i++) {
			re = &mtr->hem_cfg.region[i];
			if (re->hopnum == 0)
				continue;

			pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
			ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
		}

		if (ba_num <= ba_per_bt)
			return pg_shift;
	}

	return 0;
}

static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
			 unsigned int ba_page_shift)
{
@@ -1078,6 +1117,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,

	hns_roce_hem_list_init(&mtr->hem_list);
	if (!cfg->is_direct) {
		ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
		if (!ba_page_shift)
			return -ERANGE;

		ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
						cfg->region, cfg->region_count,
						ba_page_shift);