Unverified Commit 6201b1cd authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4665 urma: fix some bugs of urma

Merge Pull Request from: @zhaoweibo3 
 
driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I91DSN


CVE: NA

--------------------------------

Upload kernel patch, detailed fixes are as follows:

1. Fix the bug of udma driver reset timeout
2. Fix uvs_admin.log and tpsa.log paths
3. Fix the bug of memory leak after ubcore operates ioctl
4. Fix log loss issue

Fixes: 84e12236 ("ub: add new feature for urma")
Signed-off-by: default avatarWenChen <chenwen54@huawei.com>
Signed-off-by: default avatarJieLei <leijie31@huawei.com>
 
Link:https://gitee.com/openeuler/kernel/pulls/4665

 

Reviewed-by: default avatarChunzhi Hu <huchunzhi@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 737db3d8 19ae2311
Loading
Loading
Loading
Loading
+30 −14
Original line number Diff line number Diff line
@@ -403,7 +403,7 @@ static int udma_query_res_jfs(struct udma_dev *udma_dev,
			jfs->jfs_id = jfs_now->jfs_id;
			jfs->state = jfs_now->state;
			jfs->depth = jfs_now->depth;
			jfs->pri = jfs_now->pri;
			jfs->priority = jfs_now->pri;
			jfs->jfc_id = jfs_now->jfc_id;
			val->len = sizeof(struct ubcore_res_jfs_val);
			return 0;
@@ -487,7 +487,7 @@ static int udma_query_res_jetty(struct udma_dev *udma_dev,
			jetty->jetty_id = jetty_now->jetty_id;
			jetty->state = jetty_now->state;
			jetty->jfs_depth = jetty_now->jfs_depth;
			jetty->pri = jetty_now->pri;
			jetty->priority = jetty_now->pri;
			jetty->jfr_id = jetty_now->jfr_id;
			jetty->send_jfc_id  = jetty_now->jfc_s_id;
			jetty->recv_jfc_id  = jetty_now->jfc_r_id;
@@ -540,7 +540,9 @@ static int udma_query_res_seg(struct udma_dev *udma_dev,
	struct ubcore_res_seg_val *seg = (struct ubcore_res_seg_val *)val->addr;
	struct udma_mpt_entry mpt_entry;
	struct seg_list *seg_now;
	union ubcore_eid eid;
	uint32_t mpt_index;
	uint32_t token_id;
	int ret, i;

	ret = udma_find_dfx_dev(udma_dev, &i);
@@ -564,22 +566,36 @@ static int udma_query_res_seg(struct udma_dev *udma_dev,
		return ret;
	}

	seg->ubva.va = udma_reg_read(&mpt_entry, MPT_VA_L) |
		       udma_reg_read(&mpt_entry, MPT_VA_H) <<
		       MPT_VA_H_SHIFT;
	seg->len = udma_reg_read(&mpt_entry, MPT_LEN_L) |
		   udma_reg_read(&mpt_entry, MPT_LEN_H) <<
		   MPT_LEN_H_SHIFT;
	seg->token_id = udma_reg_read(&mpt_entry, MPT_LKEY);
	list_for_each_entry(seg_now,
			    &g_udma_dfx_list[i].dfx->seg_list->node, node) {
		if (seg_now->key_id == seg->token_id) {
			memcpy(&seg->ubva.eid, &seg_now->eid, sizeof(union ubcore_eid));
	token_id = udma_reg_read(&mpt_entry, MPT_LKEY);
	seg->seg_cnt = 0;

	spin_lock(&g_udma_dfx_list[i].dfx->seg_list->node_lock);
	list_for_each_entry(seg_now, &g_udma_dfx_list[i].dfx->seg_list->node, node) {
		if (seg_now->key_id == token_id) {
			memcpy(&eid, &seg_now->eid, sizeof(union ubcore_eid));
			seg->seg_cnt = 1;
			break;
		}
	}
	spin_unlock(&g_udma_dfx_list[i].dfx->seg_list->node_lock);

	val->len = sizeof(struct ubcore_res_seg_val);
	if (seg->seg_cnt == 0) {
		dev_err(udma_dev->dev, "failed to query seg, token_id = %u.\n", token_id);
		return -EINVAL;
	}

	seg->seg_list = vmalloc(sizeof(struct ubcore_seg_info));
	if (!seg->seg_list)
		return -ENOMEM;

	seg->seg_list->token_id = token_id;
	seg->seg_list->len = udma_reg_read(&mpt_entry, MPT_LEN_L) |
			     udma_reg_read(&mpt_entry, MPT_LEN_H) <<
			     MPT_LEN_H_SHIFT;
	seg->seg_list->ubva.va = udma_reg_read(&mpt_entry, MPT_VA_L) |
				 udma_reg_read(&mpt_entry, MPT_VA_H) <<
				 MPT_VA_H_SHIFT;
	seg->seg_list->ubva.eid = eid;

	return 0;
}
+2 −2
Original line number Diff line number Diff line
@@ -342,7 +342,7 @@ static int udma_query_device_attr(struct ubcore_device *dev,
	struct net_device *net_dev;
	int i;

	attr->max_eid_cnt = udma_dev->caps.max_eid_cnt;
	attr->dev_cap.max_eid_cnt = udma_dev->caps.max_eid_cnt;
	attr->dev_cap.max_jfc = (1 << udma_dev->caps.num_jfc_shift);
	attr->dev_cap.max_jfs = (1 << udma_dev->caps.num_jfs_shift);
	attr->dev_cap.max_jfr = (1 << udma_dev->caps.num_jfr_shift);
@@ -361,7 +361,7 @@ static int udma_query_device_attr(struct ubcore_device *dev,
	attr->dev_cap.feature.bs.spray_en = 1;
	attr->dev_cap.max_jfs_rsge = udma_dev->caps.max_sq_sg;
	attr->dev_cap.congestion_ctrl_alg = query_congest_alg(udma_dev->caps.cong_type);
	attr->fe_cnt = udma_dev->func_num - 1;
	attr->dev_cap.max_fe_cnt = udma_dev->func_num - 1;
	attr->port_cnt = udma_dev->caps.num_ports;
	attr->tp_maintainer = true;

+122 −54
Original line number Diff line number Diff line
@@ -227,7 +227,7 @@ struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, enum ubcore_tran

	mutex_lock(&g_device_mutex);
	list_for_each_entry(dev, &g_device_list, list_node) {
		for (idx = 0; idx < dev->attr.max_eid_cnt; idx++) {
		for (idx = 0; idx < dev->attr.dev_cap.max_eid_cnt; idx++) {
			if (memcmp(&dev->eid_table.eid_entries[idx].eid, eid,
				sizeof(union ubcore_eid)) == 0 && dev->transport_type == type) {
				target = dev;
@@ -303,15 +303,16 @@ int ubcore_add_upi_list(struct ubcore_device *dev, uint32_t upi)
	return 0;
}

void ubcore_destroy_upi_list(void)
void ubcore_destroy_upi_list(struct ubcore_device *dev)
{
	struct ubcore_upi_entry *entry = NULL, *next;

	mutex_lock(&g_upi_lock);
	list_for_each_entry_safe(entry, next, &g_upi_list, node) {
		if (entry != NULL) {
		if (entry != NULL && entry->dev == dev) {
			list_del(&entry->node);
			kfree(entry);
			break;
		}
	}
	mutex_unlock(&g_upi_lock);
@@ -431,17 +432,16 @@ struct ubcore_device *ubcore_find_tpf_device_by_name(char *dev_name,
struct ubcore_device *ubcore_find_tpf_device(struct ubcore_net_addr *netaddr,
	enum ubcore_transport_type type)
{
	char dev_name[UBCORE_MAX_DEV_NAME] = {0};
	struct ubcore_device *tpf_dev = NULL;

	if (netaddr == NULL)
		return ubcore_find_tpf_device_legacy();

	if (ubcore_lookup_sip_by_addr(netaddr, dev_name) < 0) {
		ubcore_log_warn("can not find tpf by net_addr:ip %pI6c", &netaddr->net_addr);
	tpf_dev = ubcore_lookup_tpf_by_sip_addr(netaddr);
	if (tpf_dev == NULL)
		return ubcore_find_tpf_device_legacy();
	}

	return ubcore_find_tpf_device_by_name(dev_name, type);
	return tpf_dev;
}

int ubcore_tpf_device_set_global_cfg(struct ubcore_set_global_cfg *cfg)
@@ -565,13 +565,13 @@ static int ubcore_create_eidtable(struct ubcore_device *dev)
	struct ubcore_eid_entry *entry_list;

	entry_list = kcalloc(1,
		dev->attr.max_eid_cnt * sizeof(struct ubcore_eid_entry), GFP_ATOMIC);
		dev->attr.dev_cap.max_eid_cnt * sizeof(struct ubcore_eid_entry), GFP_ATOMIC);
	if (entry_list == NULL)
		return -ENOMEM;

	dev->eid_table.eid_entries = entry_list;
	spin_lock_init(&dev->eid_table.lock);
	dev->eid_table.eid_cnt = dev->attr.max_eid_cnt;
	dev->eid_table.eid_cnt = dev->attr.dev_cap.max_eid_cnt;
	dev->dynamic_eid = 1;
	return 0;
}
@@ -717,6 +717,7 @@ int ubcore_query_all_device_tpf_dev_info(void)
					dev->dev_name);
				ret = -1;
			}
			ubcore_log_info("query tpf_dev %s to notify uvs", dev->dev_name);
		}
	}
	mutex_unlock(&g_device_mutex);
@@ -735,6 +736,9 @@ static int init_ubcore_device(struct ubcore_device *dev)
	if (dev->transport_type == UBCORE_TRANSPORT_UB && g_tpf == NULL && dev->attr.tp_maintainer)
		g_tpf = dev;

	if (dev->transport_type == UBCORE_TRANSPORT_UB && dev->attr.tp_maintainer)
		ubcore_sip_table_init(&dev->sip_table);

	device_initialize(&dev->dev);
	dev_set_drvdata(&dev->dev, dev);
	dev_set_name(&dev->dev, "%s", dev->dev_name);
@@ -775,6 +779,24 @@ static int init_ubcore_device(struct ubcore_device *dev)
	return 0;
}

static void ubcore_remove_uvs_sip_info(struct ubcore_device *dev)
{
	struct ubcore_sip_info *sip_info;
	uint32_t max_cnt = 0;
	uint32_t i;

	mutex_lock(&dev->sip_table.lock);
	max_cnt = ubcore_get_sip_max_cnt(&dev->sip_table);
	for (i = 0; i < max_cnt; i++) {
		sip_info = dev->sip_table.entry[i];
		if (sip_info == NULL)
			continue;
		if (ubcore_get_netlink_valid() == true)
			(void)ubcore_notify_uvs_del_sip(dev, sip_info, i);
	}
	mutex_unlock(&dev->sip_table.lock);
}

static void uninit_ubcore_device(struct ubcore_device *dev)
{
	ubcore_put_port_netdev(dev);
@@ -783,12 +805,14 @@ static void uninit_ubcore_device(struct ubcore_device *dev)
	ubcore_destroy_eidtable(dev);

	if (!dev->attr.virtualization)
		ubcore_destroy_upi_list();
		ubcore_destroy_upi_list(dev);

	if (g_tpf == dev && dev->attr.tp_maintainer)
		g_tpf = NULL;

	if (dev->transport_type == UBCORE_TRANSPORT_UB && dev->attr.tp_maintainer) {
		ubcore_remove_uvs_sip_info(dev);
		ubcore_sip_table_uninit(&dev->sip_table);
		if (ubcore_get_netlink_valid() && ubcore_send_remove_tpf_dev_info(dev) != 0)
			ubcore_log_warn("failed to remove tpf dev info %s", dev->dev_name);
	}
@@ -1343,58 +1367,63 @@ EXPORT_SYMBOL(ubcore_query_stats);
static int ubcore_add_device_sip(struct ubcore_device *dev, struct ubcore_sip_info *sip)
{
	uint32_t index;
	int ret;

	ret = ubcore_lookup_sip_idx(sip, &index);
	if (ret == 0) {
	if (ubcore_lookup_sip_idx(&dev->sip_table, sip, &index) == 0) {
		ubcore_log_err("sip already exists\n");
		return -1;
	}
	index = ubcore_sip_idx_alloc(0);
	index = ubcore_sip_idx_alloc(&dev->sip_table);

	if (dev->ops->add_net_addr != NULL && dev->ops->add_net_addr(dev, &sip->addr, index) != 0) {
		ubcore_log_err("Failed to set net addr");
		ret = -1;
		goto free_sip_index;
	}
	/* add net_addr entry, record idx -> netaddr mapping */
	if (ubcore_add_sip_entry(sip, index) != 0) {
		ret = -1;
	if (ubcore_add_sip_entry(&dev->sip_table, sip, index) != 0)
		goto del_net_addr;
	}

	/* nodify uvs add sip info */
	if (ubcore_get_netlink_valid() == true)
		(void)ubcore_notify_uvs_add_sip(dev, sip, index);
	if (ubcore_get_netlink_valid() == true && ubcore_notify_uvs_add_sip(dev, sip, index) != 0)
		goto del_sip_entry;

	return 0;

del_sip_entry:
	(void)ubcore_del_sip_entry(&dev->sip_table, index);
del_net_addr:
	if (dev->ops->delete_net_addr != NULL)
		dev->ops->delete_net_addr(dev, index);
free_sip_index:
	(void)ubcore_sip_idx_free(index);
	return ret;
	(void)ubcore_sip_idx_free(&dev->sip_table, index);
	return -1;
}

static int ubcore_del_device_sip(struct ubcore_device *dev, struct ubcore_sip_info *sip)
{
	uint32_t index;

	if (ubcore_lookup_sip_idx(sip, &index) != 0)
	if (ubcore_lookup_sip_idx(&dev->sip_table, sip, &index) != 0)
		return -1;

	(void)ubcore_del_sip_entry(index);
	(void)ubcore_del_sip_entry(&dev->sip_table, index);

	if (dev->ops->delete_net_addr != NULL && dev->ops->delete_net_addr(dev, index) != 0) {
		ubcore_log_err("Failed to delete net addr");
		(void)ubcore_add_sip_entry(sip, index);
		return -1;
		goto add_sip_entry;
	}
	/* nodify uvs add sip info */
	if (ubcore_get_netlink_valid() == true)
		(void)ubcore_notify_uvs_del_sip(dev, sip, index);
	if (ubcore_get_netlink_valid() == true && ubcore_notify_uvs_del_sip(dev, sip, index) != 0)
		goto add_net_addr;

	(void)ubcore_sip_idx_free(index);
	(void)ubcore_sip_idx_free(&dev->sip_table, index);
	return 0;

add_net_addr:
	if (dev->ops->add_net_addr != NULL)
		dev->ops->add_net_addr(dev, &sip->addr, index);
add_sip_entry:
	(void)ubcore_add_sip_entry(&dev->sip_table, sip, index);
	return -1;
}

static int ubcore_update_sip(struct ubcore_sip_info *sip, bool is_add)
@@ -1438,28 +1467,6 @@ int ubcore_delete_sip(struct ubcore_sip_info *sip)
}
EXPORT_SYMBOL(ubcore_delete_sip);

void ubcore_sync_sip_table(void)
{
	struct ubcore_sip_info *sip;
	struct ubcore_device *tpf_dev;
	uint32_t max_cnt;
	uint32_t i;

	max_cnt = ubcore_get_sip_max_cnt();

	for (i = 0; i < max_cnt; i++) {
		sip = ubcore_lookup_sip_info(i);
		if (sip == NULL)
			continue;

		tpf_dev = ubcore_find_tpf_device_by_name(sip->dev_name, UBCORE_TRANSPORT_UB);
		if (tpf_dev) {
			(void)ubcore_notify_uvs_add_sip(tpf_dev, sip, i);
			ubcore_put_device(tpf_dev);
		}
	}
}

struct ubcore_eid_info *ubcore_get_eid_list(struct ubcore_device *dev, uint32_t *cnt)
{
	struct ubcore_eid_info *tmp;
@@ -1467,12 +1474,12 @@ struct ubcore_eid_info *ubcore_get_eid_list(struct ubcore_device *dev, uint32_t
	uint32_t count;
	uint32_t i;

	tmp = vmalloc(dev->attr.max_eid_cnt * sizeof(struct ubcore_eid_info));
	tmp = vmalloc(dev->attr.dev_cap.max_eid_cnt * sizeof(struct ubcore_eid_info));
	if (tmp == NULL)
		return NULL;

	spin_lock(&dev->eid_table.lock);
	for (i = 0, count = 0; i < dev->attr.max_eid_cnt; i++) {
	for (i = 0, count = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) {
		if (dev->eid_table.eid_entries[i].valid == true) {
			tmp[count].eid = dev->eid_table.eid_entries[i].eid;
			tmp[count].eid_index = i;
@@ -1502,3 +1509,64 @@ void ubcore_free_eid_list(struct ubcore_eid_info *eid_list)
		vfree(eid_list);
}
EXPORT_SYMBOL(ubcore_free_eid_list);

void ubcore_sync_sip_table(void)
{
	struct ubcore_sip_table *sip_table;
	struct ubcore_device *dev = NULL;
	struct ubcore_sip_info *sip;
	uint32_t max_cnt;
	uint32_t i;

	mutex_lock(&g_device_mutex);
	list_for_each_entry(dev, &g_device_list, list_node) {
		if (dev != NULL && dev->attr.tp_maintainer == true) {
			ubcore_get_device(dev);
			sip_table = &dev->sip_table;
			mutex_lock(&sip_table->lock);
			max_cnt = ubcore_get_sip_max_cnt(sip_table);
			for (i = 0; i < max_cnt; i++) {
				sip = sip_table->entry[i];
				if (sip == NULL)
					continue;
				if (ubcore_get_netlink_valid() == true)
					(void)ubcore_notify_uvs_add_sip(dev, sip, i);
			}
			mutex_unlock(&sip_table->lock);
			ubcore_put_device(dev);
		}
	}
	mutex_unlock(&g_device_mutex);
}

struct ubcore_device *ubcore_lookup_tpf_by_sip_addr(struct ubcore_net_addr *addr)
{
	struct ubcore_device *dev = NULL, *target = NULL;
	struct ubcore_sip_table *sip_table;
	uint32_t max_cnt;
	uint32_t i = 0;

	mutex_lock(&g_device_mutex);
	list_for_each_entry(dev, &g_device_list, list_node) {
		if (dev != NULL && dev->attr.tp_maintainer == true) {
			sip_table = &dev->sip_table;
			mutex_lock(&sip_table->lock);
			max_cnt = ubcore_get_sip_max_cnt(sip_table);
			for (i = 0; dev != NULL && i < max_cnt; i++) {
				if (sip_table->entry[i] != NULL &&
					memcmp(addr, &sip_table->entry[i]->addr,
						sizeof(struct ubcore_net_addr)) == 0) {
					target = dev;
					ubcore_get_device(dev);
					mutex_unlock(&sip_table->lock);
					mutex_unlock(&g_device_mutex);
					return target;
				};
			}
			mutex_unlock(&sip_table->lock);
		}
	}
	mutex_unlock(&g_device_mutex);

	return target;
}
+2 −20
Original line number Diff line number Diff line
@@ -52,19 +52,6 @@ struct ubcore_jfr *ubcore_find_jfr(struct ubcore_device *dev, uint32_t jfr_id)
}
EXPORT_SYMBOL(ubcore_find_jfr);

static uint32_t ubcore_get_ceqn(struct ubcore_device *dev)
{
	uint32_t ceqn = 0;
	int cpu;

	if (dev->attr.dev_cap.ceq_cnt > 0) {
		cpu = get_cpu();
		ceqn = (uint32_t)(cpu % dev->attr.dev_cap.ceq_cnt);
		put_cpu();
	}
	return ceqn;
}

static int check_and_fill_jfc_attr(struct ubcore_jfc_cfg *cfg, struct ubcore_jfc_cfg *user)
{
	if (cfg->depth < user->depth)
@@ -81,15 +68,11 @@ struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, struct ubcore_jf
	struct ubcore_udata *udata)
{
	struct ubcore_jfc *jfc;
	uint32_t ceqn;

	if (dev == NULL || cfg == NULL || dev->ops->create_jfc == NULL ||
		dev->ops->destroy_jfc == NULL)
		return NULL;

	ceqn = ubcore_get_ceqn(dev);

	((struct ubcore_jfc_cfg *)cfg)->ceqn = ceqn;
	jfc = dev->ops->create_jfc(dev, cfg, udata);
	if (jfc == NULL) {
		ubcore_log_err("failed to create jfc.\n");
@@ -101,7 +84,6 @@ struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, struct ubcore_jf
		ubcore_log_err("jfc cfg is not qualified.\n");
		return NULL;
	}
	jfc->jfc_cfg.ceqn = ceqn;
	jfc->jfce_handler = jfce_handler;
	jfc->jfae_handler = jfae_handler;
	jfc->ub_dev = dev;
@@ -513,7 +495,6 @@ struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev,
	tjfr->cfg = *cfg;
	tjfr->ub_dev = dev;
	tjfr->uctx = ubcore_get_uctx(udata);
	tjfr->type = UBCORE_JFR;
	atomic_set(&tjfr->use_cnt, 0);
	mutex_init(&tjfr->lock);

@@ -631,6 +612,8 @@ static int check_jetty_cfg_with_jetty_grp(struct ubcore_jetty_cfg *cfg)

	if (cfg->flag.bs.share_jfr == 1 && (cfg->jfr == NULL ||
		cfg->token_value.token != cfg->jfr->jfr_cfg.token_value.token ||
		cfg->jetty_grp->jetty_grp_cfg.flag.bs.token_policy !=
		cfg->jfr->jfr_cfg.flag.bs.token_policy ||
		cfg->jfr->jfr_cfg.trans_mode != UBCORE_TP_RM))
		return -1;

@@ -954,7 +937,6 @@ struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev,
	tjetty->cfg = *cfg;
	tjetty->ub_dev = dev;
	tjetty->uctx = ubcore_get_uctx(udata);
	tjetty->type = cfg->type;

	atomic_set(&tjetty->use_cnt, 0);
	mutex_init(&tjetty->lock);
+74 −54
Original line number Diff line number Diff line
@@ -280,7 +280,7 @@ static int ubcore_cmd_set_eid_mode(struct ubcore_cmd_hdr *hdr)
	/* change eid mode, need to flush eids */
	event.ub_dev = dev;
	event.event_type = UBCORE_EVENT_EID_CHANGE;
	for (i = 0; i < dev->attr.max_eid_cnt; i++) {
	for (i = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) {
		if (dev->eid_table.eid_entries[i].valid == true) {
			eid = dev->eid_table.eid_entries[i].eid;
			if (dev->cfg.pattern == (uint8_t)UBCORE_PATTERN_1)
@@ -401,7 +401,7 @@ static int ubcore_copy_to_usr_tp_list(uint64_t user_tp_list, struct ubcore_res_t
	ret = ubcore_copy_to_user((void __user *)(uintptr_t)user_tp_list,
		tpg->tp_list, sizeof(uint32_t) * tpg->tp_cnt);
	if (ret != 0)
		ubcore_log_err("ubcore_copy_to_user failed.\n");
		ubcore_log_err("ubcore_copy_to_user failed, cnt = %u\n", tpg->tp_cnt);

	vfree(tpg->tp_list);
	return ret;
@@ -416,14 +416,28 @@ static int ubcore_copy_to_usr_jetty_list(uint64_t user_jetty_list,
	ret = ubcore_copy_to_user((void __user *)(uintptr_t)user_jetty_list, jetty_grp->jetty_list,
		sizeof(uint32_t) * jetty_grp->jetty_cnt);
	if (ret != 0)
		ubcore_log_err("ubcore_copy_to_user failed.\n");
		ubcore_log_err("ubcore_copy_to_user failed, cnt = %u\n", jetty_grp->jetty_cnt);

	vfree(jetty_grp->jetty_list);
	return ret;
}

static int ubcore_copy_to_usr_segment_list(uint64_t user_seg_list,
	struct ubcore_res_seg_val *seg_info)
{
	int ret;

	ret = ubcore_copy_to_user((void __user *)(uintptr_t)user_seg_list, seg_info->seg_list,
		sizeof(struct ubcore_seg_info) * seg_info->seg_cnt);
	if (ret != 0)
		ubcore_log_err("ubcore_copy_to_user failed, cnt = %u\n", seg_info->seg_cnt);

	vfree(seg_info->seg_list);
	return ret;
}

static void ubcore_query_copy_cnt(struct ubcore_cmd_query_res *arg,
	uint64_t val_addr, uint64_t user_addr)
	uint64_t k_addr, uint64_t user_addr)
{
	struct ubcore_res_tpg_val *tpg_val;
	struct ubcore_res_tpg_val *tpg_user_val;
@@ -431,22 +445,30 @@ static void ubcore_query_copy_cnt(struct ubcore_cmd_query_res *arg,
	struct ubcore_res_jetty_group_val *jgrp_val;
	struct ubcore_res_jetty_group_val *jgrp_user_val;

	struct ubcore_res_seg_val *seg_val;
	struct ubcore_res_seg_val *seg_user_val;

	struct ubcore_res_dev_val *dev_val;
	struct ubcore_res_dev_val *dev_user_val;

	switch (arg->in.type) {
	case UBCORE_RES_KEY_TPG:
		tpg_val = (struct ubcore_res_tpg_val *)val_addr;
		tpg_val = (struct ubcore_res_tpg_val *)k_addr;
		tpg_user_val = (struct ubcore_res_tpg_val *)user_addr;
		tpg_user_val->tp_cnt = tpg_val->tp_cnt;
		return;
	case UBCORE_RES_KEY_JETTY_GROUP:
		jgrp_val = (struct ubcore_res_jetty_group_val *)val_addr;
		jgrp_val = (struct ubcore_res_jetty_group_val *)k_addr;
		jgrp_user_val = (struct ubcore_res_jetty_group_val *)user_addr;
		jgrp_user_val->jetty_cnt = jgrp_val->jetty_cnt;
		return;
	case UBCORE_RES_KEY_SEG:
		seg_val = (struct ubcore_res_seg_val *)k_addr;
		seg_user_val = (struct ubcore_res_seg_val *)user_addr;
		seg_user_val->seg_cnt = seg_val->seg_cnt;
		return;
	case UBCORE_RES_KEY_URMA_DEV:
		dev_val = (struct ubcore_res_dev_val *)val_addr;
		dev_val = (struct ubcore_res_dev_val *)k_addr;
		dev_user_val = (struct ubcore_res_dev_val *)user_addr;
		dev_user_val->seg_cnt = dev_val->seg_cnt;
		dev_user_val->jfs_cnt = dev_val->jfs_cnt;
@@ -471,7 +493,7 @@ static int ubcore_query_cnt(struct ubcore_device *dev, struct ubcore_cmd_query_r
{
	struct ubcore_res_key key = {0};
	struct ubcore_res_val val = {0};
	void *kernal_addr;
	void *kernal_addr; /* urma applies for memory; driver fills; kfree during 2nd ioctl */
	void *user_addr;
	int ret;

@@ -563,20 +585,30 @@ static int ubcore_query_list(struct ubcore_device *dev, struct ubcore_cmd_query_
	if (ret != 0)
		goto kfree_addr;

	if (arg->in.type == UBCORE_RES_KEY_TPG) {
	switch (arg->in.type) {
	case UBCORE_RES_KEY_TPG:
		ret = ubcore_copy_to_usr_tp_list(
			(uint64_t)(((struct ubcore_res_tpg_val *)user_addr)->tp_list),
			(struct ubcore_res_tpg_val *)kernal_addr);
	} else if (arg->in.type == UBCORE_RES_KEY_JETTY_GROUP) {
		break;
	case UBCORE_RES_KEY_JETTY_GROUP:
		ret = ubcore_copy_to_usr_jetty_list(
			(uint64_t)(((struct ubcore_res_jetty_group_val *)user_addr)->jetty_list),
			(struct ubcore_res_jetty_group_val *)kernal_addr);
	} else if (arg->in.type == UBCORE_RES_KEY_URMA_DEV) {
		break;
	case UBCORE_RES_KEY_SEG:
		ret = ubcore_copy_to_usr_segment_list(
			(uint64_t)(((struct ubcore_res_seg_val *)user_addr)->seg_list),
			(struct ubcore_res_seg_val *)kernal_addr);
		break;
	case UBCORE_RES_KEY_URMA_DEV:
		ret = ubcore_fill_user_res_dev((struct ubcore_res_dev_val *)user_addr,
			(struct ubcore_res_dev_val *)kernal_addr);
		ubcore_query_list_free((struct ubcore_res_dev_val *)kernal_addr);
	} else {
		break;
	default:
		(void)memcpy(user_addr, k_addr, res_len);
		break;
	}

	if (ret != 0)
@@ -756,20 +788,19 @@ static void ubcore_ipv4_to_netaddr(struct ubcore_net_addr *netaddr, __be32 ipv4)
	netaddr->net_addr.in4.addr = ipv4;
}

static void ubcore_sip_init(struct ubcore_sip_info *sip, struct ubcore_device *pf_dev,
static void ubcore_sip_init(struct ubcore_sip_info *sip, struct ubcore_device *tpf_dev,
	const struct ubcore_net_addr *netaddr, uint8_t *port_list,
	uint8_t port_cnt, uint32_t prefix_len, uint32_t mtu)
	uint8_t port_cnt, uint32_t prefix_len, struct net_device *netdev)
{
	(void)memcpy(sip->dev_name, pf_dev->dev_name, UBCORE_MAX_DEV_NAME);
	(void)memcpy(sip->dev_name, tpf_dev->dev_name, UBCORE_MAX_DEV_NAME);
	(void)memcpy(&sip->addr, netaddr, sizeof(struct ubcore_net_addr));
	if (port_list != NULL)
		(void)memcpy(sip->port_id, port_list, UBCORE_MAX_PORT_CNT);
	else
		ubcore_log_warn("no one set port_list\n");

	sip->port_cnt = port_cnt;
	sip->prefix_len = prefix_len;
	sip->mtu = mtu;
	sip->mtu = netdev->mtu;
	(void)memcpy(sip->netdev_name, netdev_name(netdev),
		UBCORE_MAX_DEV_NAME);
}

static void ubcore_add_net_addr(struct ubcore_device *tpf_dev, struct ubcore_device *pf_dev,
@@ -783,23 +814,22 @@ static void ubcore_add_net_addr(struct ubcore_device *tpf_dev, struct ubcore_dev

	/* get driver set nedev port */
	ubcore_find_port_netdev(pf_dev, netdev, &port_list, &port_cnt);
	ubcore_sip_init(&sip, tpf_dev,
		netaddr, port_list, port_cnt, prefix_len, netdev);

	ubcore_sip_init(&sip, pf_dev,
		netaddr, port_list, port_cnt, prefix_len, (uint32_t)netdev->mtu);

	ret = ubcore_lookup_sip_idx(&sip, &index);
	ret = ubcore_lookup_sip_idx(&tpf_dev->sip_table, &sip, &index);
	if (ret == 0) {
		ubcore_log_err("sip already exists\n");
		return;
	}
	index = ubcore_sip_idx_alloc(0);
	index = (uint32_t)ubcore_sip_idx_alloc(&tpf_dev->sip_table);

	if (tpf_dev->ops->add_net_addr != NULL &&
		tpf_dev->ops->add_net_addr(tpf_dev, netaddr, index) != 0)
		ubcore_log_err("Failed to set net addr");

	/* add net_addr entry, record idx -> netaddr mapping */
	(void)ubcore_add_sip_entry(&sip, index);
	(void)ubcore_add_sip_entry(&tpf_dev->sip_table, &sip, index);

	/* nodify uvs add sip info */
	if (ubcore_get_netlink_valid() == true)
@@ -821,17 +851,17 @@ static void ubcore_delete_net_addr(struct ubcore_device *tpf_dev, struct ubcore_

	ubcore_find_port_netdev(pf_dev, netdev, &port_list, &port_cnt);

	ubcore_sip_init(&sip, pf_dev,
		netaddr, port_list, port_cnt, prefix_len, (uint32_t)netdev->mtu);
	if (ubcore_lookup_sip_idx(&sip, &index) != 0)
	ubcore_sip_init(&sip, tpf_dev,
		netaddr, port_list, port_cnt, prefix_len, netdev);
	if (ubcore_lookup_sip_idx(&tpf_dev->sip_table, &sip, &index) != 0)
		return;

	if (tpf_dev->ops->delete_net_addr != NULL &&
		tpf_dev->ops->delete_net_addr(tpf_dev, index) != 0)
		ubcore_log_err("Failed to delete net addr");

	(void)ubcore_del_sip_entry(index);
	(void)ubcore_sip_idx_free(index);
	(void)ubcore_del_sip_entry(&tpf_dev->sip_table, index);
	(void)ubcore_sip_idx_free(&tpf_dev->sip_table, index);
	/* nodify uvs delete sip info */
	if (ubcore_get_netlink_valid() == true)
		(void)ubcore_notify_uvs_del_sip(tpf_dev, &sip, index);
@@ -1107,38 +1137,33 @@ static int ubcore_remove_netaddr(struct ubcore_device *dev, struct net_device *n

static void ubcore_change_mtu(struct ubcore_device *dev, struct net_device *netdev)
{
	struct ubcore_sip_info new_sip = {0};
	struct ubcore_sip_info old_sip = {0};
	struct ubcore_device *tpf_dev;
	struct ubcore_sip_info *new_sip;
	struct ubcore_sip_info old_sip;
	uint32_t max_cnt;
	uint32_t i;

	tpf_dev = ubcore_find_tpf_device(NULL, UBCORE_TRANSPORT_UB);
	tpf_dev = ubcore_find_tpf_by_dev(dev, UBCORE_TRANSPORT_UB);
	if (tpf_dev == NULL)
		return;

	max_cnt = ubcore_get_sip_max_cnt();

	mutex_lock(&tpf_dev->sip_table.lock);
	max_cnt = ubcore_get_sip_max_cnt(&tpf_dev->sip_table);
	for (i = 0; i < max_cnt; i++) {
		if (ubcore_get_sip_info_copy(i, &new_sip) != 0)
			continue;

		if (memcmp(new_sip.dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME) != 0)
			continue;

		old_sip = new_sip;
		new_sip.mtu = netdev->mtu;
		if (ubcore_sip_info_update(i, (const struct ubcore_sip_info *)&new_sip) != 0)
		new_sip = ubcore_lookup_sip_info(&tpf_dev->sip_table, i);
		if (new_sip == NULL || memcmp(new_sip->netdev_name, netdev_name(netdev),
			UBCORE_MAX_DEV_NAME) != 0)
			continue;

		old_sip = *new_sip;
		new_sip->mtu = netdev->mtu;
		if (ubcore_get_netlink_valid() == true) {
			(void)ubcore_notify_uvs_del_sip(tpf_dev, &old_sip, i);
			(void)ubcore_notify_uvs_add_sip(tpf_dev, &new_sip, i);
			(void)ubcore_notify_uvs_add_sip(tpf_dev, new_sip, i);
		}
		ubcore_log_info("dev_name: %s, mtu: %u change to mtu: %u\n",
			dev->dev_name, old_sip.mtu, new_sip.mtu);
		ubcore_log_info("dev_name: %s, netdev: %s mtu: %u change to mtu: %u\n",
			dev->dev_name, netdev_name(netdev), old_sip.mtu, new_sip->mtu);
	}

	mutex_unlock(&tpf_dev->sip_table.lock);
	ubcore_put_device(tpf_dev);
}

@@ -1375,10 +1400,7 @@ static int __init ubcore_init(void)
	if (ret != 0)
		return ret;

	ubcore_sip_table_init();

	if (ubcore_netlink_init() != 0) {
		ubcore_sip_table_uninit();
		ubcore_unregister_sysfs();
		return -1;
	}
@@ -1387,7 +1409,6 @@ static int __init ubcore_init(void)
	if (ret != 0) {
		pr_err("Failed to register notifiers\n");
		ubcore_netlink_exit();
		ubcore_sip_table_uninit();
		ubcore_unregister_sysfs();
		return -1;
	}
@@ -1399,7 +1420,6 @@ static int __init ubcore_init(void)
static void __exit ubcore_exit(void)
{
	ubcore_unregister_notifiers();
	ubcore_sip_table_uninit();
	ubcore_netlink_exit();
	ubcore_unregister_sysfs();
	ubcore_log_info("ubcore module exits.\n");
Loading