Unverified Commit 81cc23e7 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4913 RDMA/hns: Support SCC parameter configuration and reporting of the...

!4913 RDMA/hns: Support SCC parameter configuration and reporting of the down/up event of the HNS RoCE network port

Merge Pull Request from: @stinft 
 
Chengchang Tang (1):
RDMA/hns: Support congestion control algorithm parameter configuration
Lang Cheng (2):
RDMA/hns: Deliver net device event to ofed
RDMA/hns: Add support for sending port down event fastly
#I95PFR
#I95PFV 
 
Link:https://gitee.com/openeuler/kernel/pulls/4913

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents dab8c5b9 ce5727c3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
	hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
	hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
	hns_roce_debugfs.o
	hns_roce_debugfs.o hns_roce_sysfs.o

ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
+39 −0
Original line number Diff line number Diff line
@@ -646,6 +646,7 @@ struct hns_roce_ib_iboe {
	struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
	struct notifier_block	nb;
	u8			phy_port[HNS_ROCE_MAX_PORTS];
	enum ib_port_state	port_state[HNS_ROCE_MAX_PORTS];
};

struct hns_roce_ceqe {
@@ -710,6 +711,14 @@ struct hns_roce_eq_table {
	struct hns_roce_eq	*eq;
};

enum hns_roce_scc_algo {
	HNS_ROCE_SCC_ALGO_DCQCN = 0,
	HNS_ROCE_SCC_ALGO_LDCP,
	HNS_ROCE_SCC_ALGO_HC3,
	HNS_ROCE_SCC_ALGO_DIP,
	HNS_ROCE_SCC_ALGO_TOTAL,
};

enum cong_type {
	CONG_TYPE_DCQCN,
	CONG_TYPE_LDCP,
@@ -954,6 +963,20 @@ struct hns_roce_hw {
				u64 *stats, u32 port, int *hw_counters);
	const struct ib_device_ops *hns_roce_dev_ops;
	const struct ib_device_ops *hns_roce_dev_srq_ops;
	int (*config_scc_param)(struct hns_roce_dev *hr_dev,
				enum hns_roce_scc_algo algo);
	int (*query_scc_param)(struct hns_roce_dev *hr_dev,
			       enum hns_roce_scc_algo alog);
};

#define HNS_ROCE_SCC_PARAM_SIZE 4
struct hns_roce_scc_param {
	__le32 param[HNS_ROCE_SCC_PARAM_SIZE];
	u32 lifespan;
	unsigned long timestamp;
	enum hns_roce_scc_algo algo_type;
	struct delayed_work scc_cfg_dwork;
	struct hns_roce_dev *hr_dev;
};

struct hns_roce_dev {
@@ -1019,6 +1042,7 @@ struct hns_roce_dev {
	u64 dwqe_page;
	struct hns_roce_dev_debugfs dbgfs;
	atomic64_t *dfx_cnt;
	struct hns_roce_scc_param *scc_param;
};

static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1154,6 +1178,19 @@ static inline u8 get_tclass(const struct ib_global_route *grh)
	       grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
}

static inline u8 to_rdma_port_num(u8 phy_port_num)
{
	return phy_port_num + 1;
}

static inline enum ib_port_state get_port_state(struct net_device *net_dev)
{
	return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
		IB_PORT_ACTIVE : IB_PORT_DOWN;
}

extern const struct attribute_group *hns_attr_port_groups[];

void hns_roce_init_uar_table(struct hns_roce_dev *dev);
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);

@@ -1294,4 +1331,6 @@ struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
				size_t length,
				enum hns_roce_mmap_type mmap_type);
void hns_roce_register_sysfs(struct hns_roce_dev *hr_dev);
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev);
#endif /* _HNS_ROCE_DEVICE_H */
+97 −0
Original line number Diff line number Diff line
@@ -6733,6 +6733,67 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
	kfree(eq_table->eq);
}

static const enum hns_roce_opcode_type scc_opcode[] = {
	HNS_ROCE_OPC_CFG_DCQCN_PARAM,
	HNS_ROCE_OPC_CFG_LDCP_PARAM,
	HNS_ROCE_OPC_CFG_HC3_PARAM,
	HNS_ROCE_OPC_CFG_DIP_PARAM,
};

static int hns_roce_v2_config_scc_param(struct hns_roce_dev *hr_dev,
					enum hns_roce_scc_algo algo)
{
	struct hns_roce_scc_param *scc_param;
	struct hns_roce_cmq_desc desc;
	int ret;

	if (algo >= HNS_ROCE_SCC_ALGO_TOTAL) {
		ibdev_err_ratelimited(&hr_dev->ib_dev, "invalid SCC algo.\n");
		return -EINVAL;
	}

	hns_roce_cmq_setup_basic_desc(&desc, scc_opcode[algo], false);
	scc_param = &hr_dev->scc_param[algo];
	memcpy(&desc.data, scc_param, sizeof(scc_param->param));

	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
	if (ret)
		ibdev_err_ratelimited(&hr_dev->ib_dev,
			"failed to configure scc param, opcode: 0x%x, ret = %d.\n",
			le16_to_cpu(desc.opcode), ret);
	return ret;
}

static int hns_roce_v2_query_scc_param(struct hns_roce_dev *hr_dev,
				       enum hns_roce_scc_algo algo)
{
	struct hns_roce_scc_param *scc_param;
	struct hns_roce_cmq_desc desc;
	int ret;

	if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 || hr_dev->is_vf)
		return -EOPNOTSUPP;

	if (algo >= HNS_ROCE_SCC_ALGO_TOTAL) {
		ibdev_err_ratelimited(&hr_dev->ib_dev, "invalid SCC algo.\n");
		return -EINVAL;
	}

	hns_roce_cmq_setup_basic_desc(&desc, scc_opcode[algo], true);
	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
	if (ret) {
		ibdev_err_ratelimited(&hr_dev->ib_dev,
			"failed to query scc param, opcode: 0x%x, ret = %d.\n",
			le16_to_cpu(desc.opcode), ret);
		return ret;
	}

	scc_param = &hr_dev->scc_param[algo];
	memcpy(scc_param, &desc.data, sizeof(scc_param->param));

	return 0;
}

static const struct ib_device_ops hns_roce_v2_dev_ops = {
	.destroy_qp = hns_roce_v2_destroy_qp,
	.modify_cq = hns_roce_v2_modify_cq,
@@ -6782,6 +6843,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
	.query_hw_counter = hns_roce_hw_v2_query_counter,
	.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
	.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
	.config_scc_param = hns_roce_v2_config_scc_param,
	.query_scc_param = hns_roce_v2_query_scc_param,
};

static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
@@ -7047,9 +7110,43 @@ static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
	return ret;
}

static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle,
					      bool linkup)
{
	struct net_device *netdev = handle->rinfo.netdev;
	struct hns_roce_dev *hr_dev = handle->priv;
	struct ib_event event;
	unsigned long flags;
	u8 phy_port;

	if (linkup || !hr_dev)
		return;

	for (phy_port = 0; phy_port < hr_dev->caps.num_ports; phy_port++)
		if (netdev == hr_dev->iboe.netdevs[phy_port])
			break;

	if (phy_port == hr_dev->caps.num_ports)
		return;

	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
	if (hr_dev->iboe.port_state[phy_port] == IB_PORT_DOWN) {
		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
		return;
	}
	hr_dev->iboe.port_state[phy_port] = IB_PORT_DOWN;
	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);

	event.device = &hr_dev->ib_dev;
	event.element.port_num = to_rdma_port_num(phy_port);
	event.event = IB_EVENT_PORT_ERR;
	ib_dispatch_event(&event);
}

static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
	.init_instance = hns_roce_hw_v2_init_instance,
	.uninit_instance = hns_roce_hw_v2_uninit_instance,
	.link_status_change = hns_roce_hw_v2_link_status_change,
	.reset_notify = hns_roce_hw_v2_reset_notify,
};

+132 −0
Original line number Diff line number Diff line
@@ -195,6 +195,10 @@ enum {
/* CMQ command */
enum hns_roce_opcode_type {
	HNS_QUERY_FW_VER				= 0x0001,
	HNS_ROCE_OPC_CFG_DCQCN_PARAM			= 0x1A80,
	HNS_ROCE_OPC_CFG_LDCP_PARAM			= 0x1A81,
	HNS_ROCE_OPC_CFG_HC3_PARAM			= 0x1A82,
	HNS_ROCE_OPC_CFG_DIP_PARAM			= 0x1A83,
	HNS_ROCE_OPC_QUERY_HW_VER			= 0x8000,
	HNS_ROCE_OPC_CFG_GLOBAL_PARAM			= 0x8001,
	HNS_ROCE_OPC_ALLOC_PF_RES			= 0x8004,
@@ -1428,6 +1432,134 @@ struct hns_roce_wqe_atomic_seg {
	__le64          cmp_data;
};

#define HNS_ROCE_DCQCN_AI_OFS 0
#define HNS_ROCE_DCQCN_AI_SZ sizeof(u16)
#define HNS_ROCE_DCQCN_AI_MAX ((u16)(~0U))
#define HNS_ROCE_DCQCN_F_OFS (HNS_ROCE_DCQCN_AI_OFS + HNS_ROCE_DCQCN_AI_SZ)
#define HNS_ROCE_DCQCN_F_SZ sizeof(u8)
#define HNS_ROCE_DCQCN_F_MAX ((u8)(~0U))
#define HNS_ROCE_DCQCN_TKP_OFS (HNS_ROCE_DCQCN_F_OFS + HNS_ROCE_DCQCN_F_SZ)
#define HNS_ROCE_DCQCN_TKP_SZ sizeof(u8)
#define HNS_ROCE_DCQCN_TKP_MAX 15
#define HNS_ROCE_DCQCN_TMP_OFS (HNS_ROCE_DCQCN_TKP_OFS + HNS_ROCE_DCQCN_TKP_SZ)
#define HNS_ROCE_DCQCN_TMP_SZ sizeof(u16)
#define HNS_ROCE_DCQCN_TMP_MAX 15
#define HNS_ROCE_DCQCN_ALP_OFS (HNS_ROCE_DCQCN_TMP_OFS + HNS_ROCE_DCQCN_TMP_SZ)
#define HNS_ROCE_DCQCN_ALP_SZ sizeof(u16)
#define HNS_ROCE_DCQCN_ALP_MAX ((u16)(~0U))
#define HNS_ROCE_DCQCN_MAX_SPEED_OFS (HNS_ROCE_DCQCN_ALP_OFS + \
					HNS_ROCE_DCQCN_ALP_SZ)
#define HNS_ROCE_DCQCN_MAX_SPEED_SZ sizeof(u32)
#define HNS_ROCE_DCQCN_MAX_SPEED_MAX ((u32)(~0U))
#define HNS_ROCE_DCQCN_G_OFS (HNS_ROCE_DCQCN_MAX_SPEED_OFS + \
					HNS_ROCE_DCQCN_MAX_SPEED_SZ)
#define HNS_ROCE_DCQCN_G_SZ sizeof(u8)
#define HNS_ROCE_DCQCN_G_MAX 15
#define HNS_ROCE_DCQCN_AL_OFS (HNS_ROCE_DCQCN_G_OFS + HNS_ROCE_DCQCN_G_SZ)
#define HNS_ROCE_DCQCN_AL_SZ sizeof(u8)
#define HNS_ROCE_DCQCN_AL_MAX ((u8)(~0U))
#define HNS_ROCE_DCQCN_CNP_TIME_OFS (HNS_ROCE_DCQCN_AL_OFS + \
					HNS_ROCE_DCQCN_AL_SZ)
#define HNS_ROCE_DCQCN_CNP_TIME_SZ sizeof(u8)
#define HNS_ROCE_DCQCN_CNP_TIME_MAX ((u8)(~0U))
#define HNS_ROCE_DCQCN_ASHIFT_OFS (HNS_ROCE_DCQCN_CNP_TIME_OFS + \
					HNS_ROCE_DCQCN_CNP_TIME_SZ)
#define HNS_ROCE_DCQCN_ASHIFT_SZ sizeof(u8)
#define HNS_ROCE_DCQCN_ASHIFT_MAX 15
#define HNS_ROCE_DCQCN_LIFESPAN_OFS (HNS_ROCE_DCQCN_ASHIFT_OFS + \
					HNS_ROCE_DCQCN_ASHIFT_SZ)
#define HNS_ROCE_DCQCN_LIFESPAN_SZ sizeof(u32)
#define HNS_ROCE_DCQCN_LIFESPAN_MAX 1000

#define HNS_ROCE_LDCP_CWD0_OFS 0
#define HNS_ROCE_LDCP_CWD0_SZ sizeof(u32)
#define HNS_ROCE_LDCP_CWD0_MAX ((u32)(~0U))
#define HNS_ROCE_LDCP_ALPHA_OFS (HNS_ROCE_LDCP_CWD0_OFS + HNS_ROCE_LDCP_CWD0_SZ)
#define HNS_ROCE_LDCP_ALPHA_SZ sizeof(u8)
#define HNS_ROCE_LDCP_ALPHA_MAX ((u8)(~0U))
#define HNS_ROCE_LDCP_GAMMA_OFS (HNS_ROCE_LDCP_ALPHA_OFS + \
					HNS_ROCE_LDCP_ALPHA_SZ)
#define HNS_ROCE_LDCP_GAMMA_SZ sizeof(u8)
#define HNS_ROCE_LDCP_GAMMA_MAX ((u8)(~0U))
#define HNS_ROCE_LDCP_BETA_OFS (HNS_ROCE_LDCP_GAMMA_OFS + \
					HNS_ROCE_LDCP_GAMMA_SZ)
#define HNS_ROCE_LDCP_BETA_SZ sizeof(u8)
#define HNS_ROCE_LDCP_BETA_MAX ((u8)(~0U))
#define HNS_ROCE_LDCP_ETA_OFS (HNS_ROCE_LDCP_BETA_OFS + HNS_ROCE_LDCP_BETA_SZ)
#define HNS_ROCE_LDCP_ETA_SZ sizeof(u8)
#define HNS_ROCE_LDCP_ETA_MAX ((u8)(~0U))
#define HNS_ROCE_LDCP_LIFESPAN_OFS (4 * sizeof(u32))
#define HNS_ROCE_LDCP_LIFESPAN_SZ sizeof(u32)
#define HNS_ROCE_LDCP_LIFESPAN_MAX 1000

#define HNS_ROCE_HC3_INITIAL_WINDOW_OFS 0
#define HNS_ROCE_HC3_INITIAL_WINDOW_SZ sizeof(u32)
#define HNS_ROCE_HC3_INITIAL_WINDOW_MAX ((u32)(~0U))
#define HNS_ROCE_HC3_BANDWIDTH_OFS (HNS_ROCE_HC3_INITIAL_WINDOW_OFS + \
					HNS_ROCE_HC3_INITIAL_WINDOW_SZ)
#define HNS_ROCE_HC3_BANDWIDTH_SZ sizeof(u32)
#define HNS_ROCE_HC3_BANDWIDTH_MAX ((u32)(~0U))
#define HNS_ROCE_HC3_QLEN_SHIFT_OFS (HNS_ROCE_HC3_BANDWIDTH_OFS + \
					HNS_ROCE_HC3_BANDWIDTH_SZ)
#define HNS_ROCE_HC3_QLEN_SHIFT_SZ sizeof(u8)
#define HNS_ROCE_HC3_QLEN_SHIFT_MAX ((u8)(~0U))
#define HNS_ROCE_HC3_PORT_USAGE_SHIFT_OFS (HNS_ROCE_HC3_QLEN_SHIFT_OFS + \
						HNS_ROCE_HC3_QLEN_SHIFT_SZ)
#define HNS_ROCE_HC3_PORT_USAGE_SHIFT_SZ sizeof(u8)
#define HNS_ROCE_HC3_PORT_USAGE_SHIFT_MAX ((u8)(~0U))
#define HNS_ROCE_HC3_OVER_PERIOD_OFS (HNS_ROCE_HC3_PORT_USAGE_SHIFT_OFS + \
					HNS_ROCE_HC3_PORT_USAGE_SHIFT_SZ)
#define HNS_ROCE_HC3_OVER_PERIOD_SZ sizeof(u8)
#define HNS_ROCE_HC3_OVER_PERIOD_MAX ((u8)(~0U))
#define HNS_ROCE_HC3_MAX_STAGE_OFS (HNS_ROCE_HC3_OVER_PERIOD_OFS + \
					HNS_ROCE_HC3_OVER_PERIOD_SZ)
#define HNS_ROCE_HC3_MAX_STAGE_SZ sizeof(u8)
#define HNS_ROCE_HC3_MAX_STAGE_MAX ((u8)(~0U))
#define HNS_ROCE_HC3_GAMMA_SHIFT_OFS (HNS_ROCE_HC3_MAX_STAGE_OFS + \
					HNS_ROCE_HC3_MAX_STAGE_SZ)
#define HNS_ROCE_HC3_GAMMA_SHIFT_SZ sizeof(u8)
#define HNS_ROCE_HC3_GAMMA_SHIFT_MAX 15
#define HNS_ROCE_HC3_LIFESPAN_OFS (4 * sizeof(u32))
#define HNS_ROCE_HC3_LIFESPAN_SZ sizeof(u32)
#define HNS_ROCE_HC3_LIFESPAN_MAX 1000

#define HNS_ROCE_DIP_AI_OFS 0
#define HNS_ROCE_DIP_AI_SZ sizeof(u16)
#define HNS_ROCE_DIP_AI_MAX ((u16)(~0U))
#define HNS_ROCE_DIP_F_OFS (HNS_ROCE_DIP_AI_OFS + HNS_ROCE_DIP_AI_SZ)
#define HNS_ROCE_DIP_F_SZ sizeof(u8)
#define HNS_ROCE_DIP_F_MAX ((u8)(~0U))
#define HNS_ROCE_DIP_TKP_OFS (HNS_ROCE_DIP_F_OFS + HNS_ROCE_DIP_F_SZ)
#define HNS_ROCE_DIP_TKP_SZ sizeof(u8)
#define HNS_ROCE_DIP_TKP_MAX 15
#define HNS_ROCE_DIP_TMP_OFS (HNS_ROCE_DIP_TKP_OFS + HNS_ROCE_DIP_TKP_SZ)
#define HNS_ROCE_DIP_TMP_SZ sizeof(u16)
#define HNS_ROCE_DIP_TMP_MAX 15
#define HNS_ROCE_DIP_ALP_OFS (HNS_ROCE_DIP_TMP_OFS + HNS_ROCE_DIP_TMP_SZ)
#define HNS_ROCE_DIP_ALP_SZ sizeof(u16)
#define HNS_ROCE_DIP_ALP_MAX ((u16)(~0U))
#define HNS_ROCE_DIP_MAX_SPEED_OFS (HNS_ROCE_DIP_ALP_OFS + HNS_ROCE_DIP_ALP_SZ)
#define HNS_ROCE_DIP_MAX_SPEED_SZ sizeof(u32)
#define HNS_ROCE_DIP_MAX_SPEED_MAX ((u32)(~0U))
#define HNS_ROCE_DIP_G_OFS (HNS_ROCE_DIP_MAX_SPEED_OFS + \
				HNS_ROCE_DIP_MAX_SPEED_SZ)
#define HNS_ROCE_DIP_G_SZ sizeof(u8)
#define HNS_ROCE_DIP_G_MAX 15
#define HNS_ROCE_DIP_AL_OFS (HNS_ROCE_DIP_G_OFS + HNS_ROCE_DIP_G_SZ)
#define HNS_ROCE_DIP_AL_SZ sizeof(u8)
#define HNS_ROCE_DIP_AL_MAX ((u8)(~0U))
#define HNS_ROCE_DIP_CNP_TIME_OFS (HNS_ROCE_DIP_AL_OFS + HNS_ROCE_DIP_AL_SZ)
#define HNS_ROCE_DIP_CNP_TIME_SZ sizeof(u8)
#define HNS_ROCE_DIP_CNP_TIME_MAX ((u8)(~0U))
#define HNS_ROCE_DIP_ASHIFT_OFS (HNS_ROCE_DIP_CNP_TIME_OFS + \
					HNS_ROCE_DIP_CNP_TIME_SZ)
#define HNS_ROCE_DIP_ASHIFT_SZ sizeof(u8)
#define HNS_ROCE_DIP_ASHIFT_MAX 15
#define HNS_ROCE_DIP_LIFESPAN_OFS (HNS_ROCE_DIP_ASHIFT_OFS + \
					HNS_ROCE_DIP_ASHIFT_SZ)
#define HNS_ROCE_DIP_LIFESPAN_SZ sizeof(u32)
#define HNS_ROCE_DIP_LIFESPAN_MAX 1000

struct hns_roce_sccc_clr {
	__le32 qpn;
	__le32 rsv[5];
+35 −11
Original line number Diff line number Diff line
@@ -89,10 +89,13 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
}

static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
			   unsigned long event)
			   unsigned long dev_event)
{
	struct device *dev = hr_dev->dev;
	enum ib_port_state port_state;
	struct net_device *netdev;
	struct ib_event event;
	unsigned long flags;
	int ret = 0;

	netdev = hr_dev->iboe.netdevs[port];
@@ -101,20 +104,38 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
		return -ENODEV;
	}

	switch (event) {
	case NETDEV_UP:
	case NETDEV_CHANGE:
	switch (dev_event) {
	case NETDEV_REGISTER:
	case NETDEV_CHANGEADDR:
		ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
		break;
	case NETDEV_UP:
	case NETDEV_CHANGE:
		ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
		if (ret)
			return ret;
		fallthrough;
	case NETDEV_DOWN:
		/*
		 * In v1 engine, only support all ports closed together.
		 */
		port_state = get_port_state(netdev);

		spin_lock_irqsave(&hr_dev->iboe.lock, flags);
		if (hr_dev->iboe.port_state[port] == port_state) {
			spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
			return NOTIFY_DONE;
		}
		hr_dev->iboe.port_state[port] = port_state;
		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);

		event.device = &hr_dev->ib_dev;
		event.event = (port_state == IB_PORT_ACTIVE) ?
			      IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
		event.element.port_num = to_rdma_port_num(port);
		ib_dispatch_event(&event);
		break;
	case NETDEV_UNREGISTER:
		break;
	default:
		dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
		dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(dev_event));
		break;
	}

@@ -151,6 +172,8 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
	u8 i;

	for (i = 0; i < hr_dev->caps.num_ports; i++) {
		hr_dev->iboe.port_state[i] = IB_PORT_DOWN;

		ret = hns_roce_set_mac(hr_dev, i,
				       hr_dev->iboe.netdevs[i]->dev_addr);
		if (ret)
@@ -248,9 +271,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,

	mtu = iboe_get_mtu(net_dev->mtu);
	props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
	props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
			       IB_PORT_ACTIVE :
			       IB_PORT_DOWN;
	props->state = get_port_state(net_dev);
	props->phys_state = props->state == IB_PORT_ACTIVE ?
				    IB_PORT_PHYS_STATE_LINK_UP :
				    IB_PORT_PHYS_STATE_DISABLED;
@@ -633,6 +654,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
	.query_pkey = hns_roce_query_pkey,
	.query_port = hns_roce_query_port,
	.reg_user_mr = hns_roce_reg_user_mr,
	.port_groups = hns_attr_port_groups,

	INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
	INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
@@ -1106,6 +1128,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
	if (ret)
		goto error_failed_register_device;

	hns_roce_register_sysfs(hr_dev);
	hns_roce_register_debugfs(hr_dev);

	return 0;
@@ -1140,6 +1163,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)

void hns_roce_exit(struct hns_roce_dev *hr_dev)
{
	hns_roce_unregister_sysfs(hr_dev);
	hns_roce_unregister_debugfs(hr_dev);
	hns_roce_unregister_device(hr_dev);

Loading