Commit b441cee3 authored by Jie Lei's avatar Jie Lei Committed by JangShui Yang
Browse files

hns3 udma: adjustment of the maximum number of Jettys

driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8V1IQ


CVE: NA

--------------------------------------------------------

This patch introduces the driver loading parameter rm_support.
The default value of the parameter is 1, indicating that the
RM mode is supported. In this case, the number of Jettys
is limited. When rm_support is set to 0, the RM mode is
not supported. In this case, the total number of Jettys in RC
and UM modes is the same as the number of QPs,
and the maximum number can reach 1M.

Fixes: efe11e97 ("hns3 udma: init software tables of qp/uar and
others")
Signed-off-by: default avatarChunzhi Hu <huchunzhi@huawei.com>
Signed-off-by: default avatarJie Lei <leijie31@huawei.com>
parent 0d79451b
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -72,6 +72,7 @@ enum udma_jfr_cap_flags {

struct udma_create_jfr_resp {
	uint32_t jfr_caps;
	uint32_t srqn;
};

struct udma_jfc_attr_ex {
@@ -183,6 +184,10 @@ struct udma_create_ctx_resp {
	uint32_t dca_qps;
	uint32_t dca_mmap_size;
	uint32_t dca_mode;
	uint8_t chip_id;
	uint8_t die_id;
	uint8_t func_id;
	bool rm_support;
};

struct flush_cqe_param {
+1 −0
Original line number Diff line number Diff line
@@ -861,6 +861,7 @@ struct udma_dev {
	struct udma_port		port_data[UDMA_MAX_PORTS];
	struct udma_dev_debugfs		*dbgfs;
	uint64_t			notify_addr;
	bool				rm_support;
};

struct udma_seg {
+23 −34
Original line number Diff line number Diff line
@@ -176,10 +176,16 @@ static void set_default_jetty_caps(struct udma_dev *dev)
{
	struct udma_caps *caps = &dev->caps;

	caps->num_jfc_shift = UDMA_DEFAULT_MAX_JETTY_X_SHIFT;
	caps->num_jfc_shift = ilog2(caps->num_cqs);
	if (dev->rm_support) {
		caps->num_jfs_shift = UDMA_DEFAULT_MAX_JETTY_X_SHIFT;
		caps->num_jfr_shift = UDMA_DEFAULT_MAX_JETTY_X_SHIFT;
		caps->num_jetty_shift = UDMA_DEFAULT_MAX_JETTY_X_SHIFT;
	} else {
		caps->num_jfs_shift = caps->num_qps_shift;
		caps->num_jfr_shift = caps->num_qps_shift;
		caps->num_jetty_shift = caps->num_qps_shift;
	}
}

static void query_hw_speed(struct udma_dev *udma_dev)
@@ -419,16 +425,6 @@ static int load_func_res_caps(struct udma_dev *udma_dev)
	return 0;
}

static void setup_default_ext_caps(struct udma_dev *udma_dev)
{
	struct udma_caps *caps = &udma_dev->caps;

	caps->num_pi_qps = caps->num_qps;
	caps->llm_ba_idx = 0;
	caps->llm_ba_num = UDMA_EXT_LLM_MAX_DEPTH;

}

static int load_ext_cfg_caps(struct udma_dev *udma_dev)
{
	struct udma_cmq_desc desc;
@@ -469,8 +465,6 @@ static int query_func_resource_caps(struct udma_dev *udma_dev)
		return ret;
	}

	setup_default_ext_caps(udma_dev);

	ret = load_ext_cfg_caps(udma_dev);
	if (ret)
		dev_err(dev, "failed to load ext cfg, ret = %d (pf).\n",
@@ -966,18 +960,6 @@ static int udma_alloc_cmq_desc(struct udma_dev *udma_dev,
	return 0;
}

static void udma_free_cmq_desc(struct udma_dev *udma_dev,
			       struct udma_cmq_ring *ring)
{
	dma_unmap_single(udma_dev->dev, ring->desc_dma_addr,
			 ring->desc_num * sizeof(struct udma_cmq_desc),
			 DMA_BIDIRECTIONAL);

	ring->desc_dma_addr = 0;
	kfree(ring->desc);
	ring->desc = NULL;
}

static int init_csq(struct udma_dev *udma_dev,
		    struct udma_cmq_ring *csq)
{
@@ -1023,15 +1005,22 @@ static int udma_cmq_init(struct udma_dev *udma_dev)
static void udma_cmq_exit(struct udma_dev *udma_dev)
{
	struct udma_priv *priv = (struct udma_priv *)udma_dev->priv;
	struct udma_cmq_ring *ring = (struct udma_cmq_ring *)&priv->cmq.csq;

	dma_unmap_single(udma_dev->dev, ring->desc_dma_addr,
			 ring->desc_num * sizeof(struct udma_cmq_desc),
			 DMA_BIDIRECTIONAL);

	udma_free_cmq_desc(udma_dev, &priv->cmq.csq);
	ring->desc_dma_addr = 0;
	kfree(ring->desc);
	ring->desc = NULL;
}

static void func_clr_hw_resetting_state(struct udma_dev *udma_dev,
					struct hnae3_handle *handle)
{
	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
	uint64_t end;
	int end;

	udma_dev->dis_db = true;

@@ -1134,7 +1123,7 @@ static void __udma_function_clear(struct udma_dev *udma_dev, int vf_id)
	bool fclr_write_fail_flag = false;
	struct udma_func_clear *resp;
	struct udma_cmq_desc desc;
	uint64_t end;
	int end;
	int ret = 0;

	if (check_device_is_in_reset(udma_dev))
@@ -1585,7 +1574,7 @@ static int config_hem_ba_to_hw(struct udma_dev *udma_dev, uint64_t obj,
	struct udma_mbox *mb;
	int ret;

	if (IS_ERR_OR_NULL(mbox))
	if (IS_ERR(mbox))
		return -ENOMEM;

	mb = (struct udma_mbox *)desc.data;
@@ -1785,8 +1774,8 @@ static int __udma_init_instance(struct hnae3_handle *handle)
	if (dfx_switch) {
		ret = udma_dfx_init(udma_dev);
		if (ret) {
			dev_err(udma_dev->dev, "UDMA dfx init failed(%d)!\n",
				ret);
			dev_err(udma_dev->dev,
				"UDMA dfx init failed(%d)!\n", ret);
			goto error_failed_dfx_init;
		}
	}
+53 −8
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@ static void init_jetty_cfg(struct udma_jetty *jetty,
	jetty->shared_jfr = cfg->flag.bs.share_jfr;
	jetty->tp_mode = cfg->trans_mode;
	jetty->ubcore_jetty.jetty_cfg = *cfg;
	jetty->send_jfc = to_udma_jfc(cfg->send_jfc);
}

static void udma_fill_jetty_um_qp_attr(struct udma_dev *dev,
@@ -316,6 +317,33 @@ static int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty)
	return ret;
}

static int alloc_common_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty)
{
	struct udma_jetty_table *jetty_table = &udma_dev->jetty_table;
	int ret;

	ret = alloc_common_qpn(udma_dev, jetty->send_jfc, &jetty->jetty_id);
	if (ret)
		return ret;

	ret = xa_err(xa_store(&jetty_table->xa, jetty->jetty_id, jetty, GFP_KERNEL));
	if (ret) {
		dev_err(udma_dev->dev, "failed to store Jetty, ret = %d.\n",
			ret);
		free_common_qpn(udma_dev, jetty->jetty_id);
		return ret;
	}
	jetty->ubcore_jetty.id = jetty->jetty_id;

	return ret;
}

static void free_common_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty)
{
	xa_erase(&udma_dev->jetty_table.xa, jetty->jetty_id);
	free_common_qpn(udma_dev, jetty->jetty_id);
}

static void store_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty)
{
	struct jetty_list *jetty_new;
@@ -419,15 +447,24 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev,
	struct udma_jetty *jetty;
	int ret;

	if (!udma_dev->rm_support && cfg->trans_mode == UBCORE_TP_RM) {
		dev_err(udma_dev->dev, "RM mode jetty is not supported.\n");
		return NULL;
	}

	jetty = kzalloc(sizeof(struct udma_jetty), GFP_KERNEL);
	if (!jetty)
		return NULL;

	init_jetty_cfg(jetty, cfg);
	if (cfg->trans_mode == UBCORE_TP_RM)
		ret = alloc_jetty_id(udma_dev, jetty);
	else
		ret = alloc_common_jetty_id(udma_dev, jetty);
	if (ret)
		goto err_alloc_jetty_id;

	if (cfg->trans_mode == UBCORE_TP_RM)
		init_jetty_x_qpn_bitmap(udma_dev, &jetty->qpn_map,
					udma_dev->caps.num_jetty_shift,
					UDMA_JETTY_QPN_PREFIX, jetty->jetty_id);
@@ -446,8 +483,12 @@ struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev,
	return &jetty->ubcore_jetty;

err_alloc_jetty_buf:
	if (cfg->trans_mode == UBCORE_TP_RM)
		clean_jetty_x_qpn_bitmap(&jetty->qpn_map);
	if (cfg->trans_mode == UBCORE_TP_RM)
		free_jetty_id(udma_dev, jetty);
	else
		free_common_jetty_id(udma_dev, jetty);
err_alloc_jetty_id:
	kfree(jetty);

@@ -485,12 +526,16 @@ int udma_destroy_jetty(struct ubcore_jetty *jetty)
	udma_dev = to_udma_dev(jetty->ub_dev);

	ret = free_jetty_buf(udma_dev, udma_jetty);
	if (udma_jetty->tp_mode == UBCORE_TP_RM)
		clean_jetty_x_qpn_bitmap(&udma_jetty->qpn_map);

	if (dfx_switch)
		delete_jetty_id(udma_dev, udma_jetty);

	if (udma_jetty->tp_mode == UBCORE_TP_RM)
		free_jetty_id(udma_dev, udma_jetty);
	else
		free_common_jetty_id(udma_dev, udma_jetty);
	kfree(udma_jetty);

	return ret;
+1 −0
Original line number Diff line number Diff line
@@ -47,6 +47,7 @@ struct udma_jetty {
	uint32_t		jetty_id;
	struct mutex		tp_mutex;
	bool			dca_en;
	struct udma_jfc		*send_jfc;
};

static inline struct udma_jetty *to_udma_jetty(struct ubcore_jetty *ubcore_jetty)
Loading