Commit 4888db6e authored by Yu Zhang's avatar Yu Zhang Committed by JiangShui
Browse files

hns3 udma: support create and destroy JETTY

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I85R2F


CVE: NA

---------------------------------------------------------------------

This patch supports the creation and deletion of JETTY.
Compared with the JFS/JFR programming mode, JETTY provides both sending
and receiving functions. In the sending direction, JETTY contains a JFS
that supports sending messages to multiple destination nodes. In the
receive direction, JETTY contains a JFR that can receive data from
different nodes. JFS and JFR in JETTY can point to different JFCs.
When creating a JETTY, the depth and attributes of the JFS and JFR
contained in the JETTY need to specified. UDMA driver will allcate
memory resources and configure the hardware(QP Context and SRQ Context).

Signed-off-by: default avatarYu Zhang <zhangyu709@huawei.com>
parent 85edae91
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -10,6 +10,7 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/ \
$(MODULE_NAME)-objs := hns3_udma_hw.o hns3_udma_main.o hns3_udma_cmd.o \
$(MODULE_NAME)-objs := hns3_udma_hw.o hns3_udma_main.o hns3_udma_cmd.o \
			hns3_udma_hem.o hns3_udma_qp.o hns3_udma_eq.o \
			hns3_udma_hem.o hns3_udma_qp.o hns3_udma_eq.o \
			hns3_udma_db.o hns3_udma_jfc.o hns3_udma_jfr.o \
			hns3_udma_db.o hns3_udma_jfc.o hns3_udma_jfr.o \
			hns3_udma_segment.o  hns3_udma_tp.o hns3_udma_jfs.o
			hns3_udma_segment.o  hns3_udma_tp.o hns3_udma_jfs.o \
			hns3_udma_jetty.o


obj-$(CONFIG_UB_UDMA_HNS3) := hns3_udma.o
obj-$(CONFIG_UB_UDMA_HNS3) := hns3_udma.o
+12 −0
Original line number Original line Diff line number Diff line
@@ -25,6 +25,7 @@
#define UDMA_JETTY_X_PREFIX_BIT_NUM	2
#define UDMA_JETTY_X_PREFIX_BIT_NUM	2
#define UDMA_JFS_QPN_PREFIX		0x0
#define UDMA_JFS_QPN_PREFIX		0x0
#define UDMA_JFR_QPN_PREFIX		0x1
#define UDMA_JFR_QPN_PREFIX		0x1
#define UDMA_JETTY_QPN_PREFIX		0x2
#define UDMA_ADDR_4K_MASK		0xfffUL
#define UDMA_ADDR_4K_MASK		0xfffUL
#define URMA_SEG_ACCESS_GUARD		(1UL << 5)
#define URMA_SEG_ACCESS_GUARD		(1UL << 5)


@@ -99,6 +100,13 @@ struct udma_create_tp_ucmd {
	uint64_t		sdb_addr;
	uint64_t		sdb_addr;
};
};


struct udma_create_jetty_ucmd {
	struct udma_create_tp_ucmd	create_tp_ucmd;
	uint32_t			jfr_id;
	uint64_t			buf_addr;
	uint64_t			sdb_addr;
};

enum udma_qp_cap_flags {
enum udma_qp_cap_flags {
	UDMA_QP_CAP_RQ_RECORD_DB = 1 << 0,
	UDMA_QP_CAP_RQ_RECORD_DB = 1 << 0,
	UDMA_QP_CAP_SQ_RECORD_DB = 1 << 1,
	UDMA_QP_CAP_SQ_RECORD_DB = 1 << 1,
@@ -113,6 +121,10 @@ struct udma_create_tp_resp {
	uint8_t			priority;
	uint8_t			priority;
};
};


struct udma_create_jetty_resp {
	struct udma_create_tp_resp create_tp_resp;
};

struct udma_create_jfs_ucmd {
struct udma_create_jfs_ucmd {
	struct udma_create_tp_ucmd create_tp_ucmd;
	struct udma_create_tp_ucmd create_tp_ucmd;
};
};
+7 −2
Original line number Original line Diff line number Diff line
@@ -45,6 +45,8 @@
#define UDMA_GMV_ENTRY_SZ			32
#define UDMA_GMV_ENTRY_SZ			32


#define UDMA_CQ_BANK_NUM			4
#define UDMA_CQ_BANK_NUM			4

#define UDMA_SGE_IN_WQE				2
#define UDMA_SGE_SHIFT				4
#define UDMA_SGE_SHIFT				4
#define UDMA_SGE_SIZE				16
#define UDMA_SGE_SIZE				16
#define UDMA_IDX_QUE_ENTRY_SZ			4
#define UDMA_IDX_QUE_ENTRY_SZ			4
@@ -53,6 +55,9 @@
#define UDMA_PAGE_SIZE				(1 << UDMA_HW_PAGE_SHIFT)
#define UDMA_PAGE_SIZE				(1 << UDMA_HW_PAGE_SHIFT)
#define udma_hw_page_align(x)		ALIGN(x, 1 << UDMA_HW_PAGE_SHIFT)
#define udma_hw_page_align(x)		ALIGN(x, 1 << UDMA_HW_PAGE_SHIFT)


#define UDMA_DWQE_SIZE				65536
#define UDMA_DWQE_MMAP_QP_NUM			1024

#define UDMA_HOP_NUM_0				0xff
#define UDMA_HOP_NUM_0				0xff
#define UDMA_CAP_FLAGS_EX_SHIFT			12
#define UDMA_CAP_FLAGS_EX_SHIFT			12


@@ -137,6 +142,8 @@
#define UDMA_DEV_START_OFFSET 2
#define UDMA_DEV_START_OFFSET 2
#define UDMA_DEV_EX_START_OFFSET 4
#define UDMA_DEV_EX_START_OFFSET 4


#define UDMA_MIN_JFS_DEPTH 64

enum {
enum {
	NO_ARMED = 0x0
	NO_ARMED = 0x0
};
};
@@ -287,8 +294,6 @@ struct udma_buf_attr {
	} region[UDMA_MAX_BT_REGION];
	} region[UDMA_MAX_BT_REGION];
	uint32_t		region_count; /* valid region count */
	uint32_t		region_count; /* valid region count */
	uint32_t		page_shift;  /* buffer page shift */
	uint32_t		page_shift;  /* buffer page shift */
	/* only alloc buffer-required MTT memory */
	bool			mtt_only;
};
};


struct udma_buf_list {
struct udma_buf_list {
+0 −9
Original line number Original line Diff line number Diff line
@@ -1920,15 +1920,6 @@ int udma_mtr_create(struct udma_dev *udma_dev, struct udma_mtr *mtr,
		return ret;
		return ret;
	}
	}


	/* The caller has its own buffer list and invokes the udma_mtr_map()
	 * to finish the MTT configuration.
	 */
	if (buf_attr->mtt_only) {
		mtr->umem = NULL;
		mtr->kmem = NULL;
		return 0;
	}

	ret = mtr_alloc_bufs(udma_dev, mtr, buf_attr, user_addr, is_user);
	ret = mtr_alloc_bufs(udma_dev, mtr, buf_attr, user_addr, is_user);
	if (ret) {
	if (ret) {
		dev_err(dev, "failed to alloc mtr bufs, ret = %d.\n", ret);
		dev_err(dev, "failed to alloc mtr bufs, ret = %d.\n", ret);
+378 −0
Original line number Original line Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Huawei UDMA Linux driver
 * Copyright (c) 2023-2023 Hisilicon Limited.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
 * for more details.
 *
 */

#include <linux/slab.h>
#include "urma/ubcore_types.h"
#include "hns3_udma_tp.h"
#include "hns3_udma_jfc.h"
#include "hns3_udma_jfr.h"
#include "hns3_udma_db.h"
#include "hns3_udma_jetty.h"

static void init_jetty_cfg(struct udma_jetty *jetty,
			   const struct ubcore_jetty_cfg *cfg)
{
	jetty->shared_jfr = cfg->flag.bs.share_jfr;
	jetty->tp_mode = cfg->trans_mode;
	jetty->ubcore_jetty.jetty_cfg = *cfg;
}

static void udma_fill_jetty_um_qp_attr(struct udma_dev *dev,
				       struct udma_jetty *jetty,
				       struct ubcore_ucontext *uctx,
				       const struct ubcore_jetty_cfg *cfg)
{
	struct udma_ucontext *udma_ctx = to_udma_ucontext(uctx);
	struct udma_qp_attr *qp_attr = &jetty->qp.qp_attr;

	qp_attr->is_jetty = true;
	qp_attr->uctx = uctx;
	qp_attr->pdn = udma_ctx->pdn;
	qp_attr->send_jfc = to_udma_jfc(cfg->send_jfc);
	qp_attr->jetty = jetty;
	qp_attr->cap.max_send_wr = cfg->jfs_depth;
	qp_attr->cap.max_send_sge = cfg->max_send_sge;
	qp_attr->cap.max_inline_data = cfg->max_inline_data;
	qp_attr->cap.retry_cnt = cfg->retry_cnt;
	qp_attr->cap.rnr_retry = cfg->rnr_retry;
	qp_attr->cap.ack_timeout = cfg->err_timeout;
	qp_attr->qp_type = QPT_UD;

	qp_attr->jfr = jetty->udma_jfr;
	qp_attr->qpn_map = &jetty->qpn_map;
	qp_attr->recv_jfc = to_udma_jfc(cfg->recv_jfc);
	if (jetty->ubcore_jetty.jetty_cfg.priority >= dev->caps.sl_num) {
		qp_attr->priority =
			dev->caps.sl_num > 0 ? dev->caps.sl_num - 1 : 0;
		dev_err(dev->dev, "The setted priority (%d) should smaller than the max priority (%d), priority (%d) is used\n",
			jetty->ubcore_jetty.jetty_cfg.priority,
			dev->caps.sl_num, qp_attr->priority);
	} else {
		qp_attr->priority = jetty->ubcore_jetty.jetty_cfg.priority;
	}
}

static int udma_modify_qp_jetty(struct udma_dev *dev, struct udma_jetty *jetty,
				enum udma_qp_state target_state)
{
	struct udma_modify_tp_attr m_attr = {};
	struct udma_qp *qp;
	int ret;

	qp = &jetty->qp;
	qp->udma_device = dev;
	qp->send_jfc = qp->qp_attr.send_jfc;
	qp->recv_jfc = qp->qp_attr.recv_jfc;

	m_attr.path_mtu = UBCORE_MTU_4096;
	m_attr.hop_limit = MAX_HOP_LIMIT;

	ret = udma_modify_qp_common(qp, &m_attr, jetty->qp.state, target_state);
	if (ret)
		dev_err(dev->dev, "failed to modify qpc to RTS in Jetty.\n");

	qp->state = target_state;
	return ret;
}

static int set_jetty_jfr(struct udma_dev *dev, struct udma_jetty *jetty,
			 const struct ubcore_jetty_cfg *cfg, uint32_t jfr_id)
{
	if (cfg->jfr) {
		jetty->shared_jfr = true;
		jetty->udma_jfr = to_udma_jfr(cfg->jfr);
	} else {
		jetty->shared_jfr = false;
		jetty->udma_jfr = get_udma_jfr(&dev->ub_dev, jfr_id);
		if (!jetty->udma_jfr) {
			dev_err(dev->dev,
				"failed to find jfr, jfr_id:%u.\n", jfr_id);
			return -EINVAL;
		}
	}

	return 0;
}

static int alloc_jetty_um_qp(struct udma_dev *dev, struct udma_jetty *jetty,
			     const struct ubcore_jetty_cfg *cfg,
			     struct ubcore_udata *udata)
{
	int ret;

	udma_fill_jetty_um_qp_attr(dev, jetty, udata->uctx, cfg);

	ret = udma_create_qp_common(dev, &jetty->qp, udata);
	if (ret) {
		dev_err(dev->dev, "failed create qp for um jetty.\n");
		return ret;
	}

	jetty->qp.state = QPS_RESET;
	ret = udma_modify_qp_jetty(dev, jetty, QPS_RTS);
	if (ret)
		udma_destroy_qp_common(dev, &jetty->qp);

	return ret;
}

static void set_jetty_ext_sge_param(struct udma_jetty *jetty)
{
	struct ubcore_jetty_cfg *jetty_cfg = &jetty->ubcore_jetty.jetty_cfg;
	uint32_t max_inline_data;
	uint32_t wqe_sge_cnt = 0;
	uint32_t total_sge_cnt;
	uint32_t ext_sge_cnt;
	uint32_t sq_wqe_cnt;
	uint32_t max_gs;

	sq_wqe_cnt = jetty->rc_node.wqe_cnt;
	jetty->rc_node.sge_shift = UDMA_SGE_SHIFT;
	max_inline_data = roundup_pow_of_two(jetty_cfg->max_inline_data);
	ext_sge_cnt = max_inline_data / UDMA_SGE_SIZE;

	max_gs = max_t(uint32_t, ext_sge_cnt, jetty_cfg->max_send_sge);
	if (max_gs > UDMA_SGE_IN_WQE)
		wqe_sge_cnt = max_gs - UDMA_SGE_IN_WQE;

	if (wqe_sge_cnt) {
		total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt);
		jetty->rc_node.sge_cnt = max(total_sge_cnt,
					     (uint32_t)UDMA_PAGE_SIZE /
					     UDMA_SGE_SIZE);
	}
}

static int set_jetty_buf_attr(struct udma_dev *udma_dev,
			      struct udma_jetty *jetty,
			      struct udma_buf_attr *buf_attr)
{
	int totle_buff_size = 0;
	uint32_t cfg_depth;
	int buf_size;
	int idx = 0;

	/* SQ WQE */
	jetty->rc_node.sge_offset = 0;
	cfg_depth = roundup_pow_of_two(jetty->ubcore_jetty.jetty_cfg.jfs_depth);
	jetty->rc_node.wqe_cnt = cfg_depth < UDMA_MIN_JFS_DEPTH ?
				 UDMA_MIN_JFS_DEPTH : cfg_depth;
	jetty->rc_node.wqe_shift = UDMA_SQ_WQE_SHIFT;
	set_jetty_ext_sge_param(jetty);

	buf_size = to_udma_hem_entries_size(jetty->rc_node.wqe_cnt,
					    jetty->rc_node.wqe_shift);
	if (buf_size > 0) {
		buf_attr->region[idx].size = buf_size;
		buf_attr->region[idx].hopnum = udma_dev->caps.wqe_sq_hop_num;
		idx++;
		totle_buff_size += buf_size;
	}
	/* extend SGE WQE in SQ */
	jetty->rc_node.sge_offset = totle_buff_size;

	buf_size = to_udma_hem_entries_size(jetty->rc_node.sge_cnt,
					    jetty->rc_node.sge_shift);
	if (buf_size > 0) {
		buf_attr->region[idx].size = buf_size;
		buf_attr->region[idx].hopnum = udma_dev->caps.wqe_sge_hop_num;
		idx++;
		totle_buff_size += buf_size;
	}

	if (totle_buff_size < 1)
		return -EINVAL;

	buf_attr->region_count = idx;
	buf_attr->page_shift = PAGE_SHIFT + udma_dev->caps.mtt_buf_pg_sz;

	return 0;
}

static int alloc_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty,
			   const struct ubcore_jetty_cfg *cfg, struct ubcore_udata *udata)
{
	struct udma_create_jetty_ucmd ucmd = {};
	struct udma_buf_attr buf_attr = {};
	int ret;

	if (udata) {
		ret = copy_from_user(&ucmd, (void *)udata->udrv_data->in_addr,
				     min(udata->udrv_data->in_len,
					 (uint32_t)sizeof(ucmd)));
		if (ret) {
			dev_err(dev->dev,
				"failed to copy jetty udata, ret = %d.\n",
				ret);
			return -EFAULT;
		}
	}

	ret = set_jetty_jfr(dev, jetty, cfg, ucmd.jfr_id);
	if (ret)
		return ret;

	if (cfg->trans_mode == UBCORE_TP_UM) {
		ret = alloc_jetty_um_qp(dev, jetty, cfg, udata);
		if (ret)
			return ret;
	} else if (cfg->trans_mode == UBCORE_TP_RM) {
		xa_init(&jetty->srm_node_table);
	} else if (cfg->trans_mode == UBCORE_TP_RC) {
		jetty->rc_node.buf_addr = ucmd.buf_addr;
		ret = set_jetty_buf_attr(dev, jetty, &buf_attr);
		if (ret) {
			dev_err(dev->dev,
				"failed to set jetty buf attr, ret = %d.\n",
				ret);
			return ret;
		}

		ret = udma_mtr_create(dev, &jetty->rc_node.mtr, &buf_attr,
				      PAGE_SHIFT + dev->caps.mtt_ba_pg_sz,
				      ucmd.buf_addr, !!udata);
		if (ret) {
			dev_err(dev->dev,
				"failed to create WQE mtr for RC Jetty, ret = %d.\n",
				ret);
			return ret;
		}

		ret = udma_db_map_user(dev, ucmd.sdb_addr, &jetty->rc_node.sdb);
		if (ret) {
			dev_err(dev->dev,
				"failed to map user sdb_addr, ret = %d.\n",
				ret);
			udma_mtr_destroy(dev, &jetty->rc_node.mtr);
			return ret;
		}
	}

	return 0;
}

static int alloc_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty)
{
	struct udma_jetty_table *jetty_table = &udma_dev->jetty_table;
	struct udma_ida *jetty_ida = &jetty_table->jetty_ida;
	int ret;
	int id;

	id = ida_alloc_range(&jetty_ida->ida, jetty_ida->min, jetty_ida->max,
			     GFP_KERNEL);
	if (id < 0) {
		dev_err(udma_dev->dev, "failed to alloc jetty_id(%d).\n", id);
		return id;
	}
	jetty->jetty_id = (uint32_t)id;
	jetty->ubcore_jetty.id = jetty->jetty_id;

	ret = xa_err(xa_store(&jetty_table->xa, jetty->jetty_id, jetty,
			      GFP_KERNEL));
	if (ret) {
		dev_err(udma_dev->dev, "failed to store Jetty, ret = %d.\n",
			ret);
		ida_free(&jetty_ida->ida, id);
	}

	return ret;
}


static void free_jetty_id(struct udma_dev *udma_dev, struct udma_jetty *jetty)
{
	struct udma_jetty_table *jetty_table = &udma_dev->jetty_table;
	struct udma_ida *jetty_ida = &jetty_table->jetty_ida;

	xa_erase(&jetty_table->xa, jetty->jetty_id);
	ida_free(&jetty_ida->ida, (int)jetty->jetty_id);
}

struct ubcore_jetty *udma_create_jetty(struct ubcore_device *dev,
				  const struct ubcore_jetty_cfg *cfg,
				  struct ubcore_udata *udata)
{
	struct udma_dev *udma_dev = to_udma_dev(dev);
	struct udma_jetty *jetty;
	int ret;

	jetty = kzalloc(sizeof(struct udma_jetty), GFP_KERNEL);
	if (!jetty)
		return NULL;

	init_jetty_cfg(jetty, cfg);
	ret = alloc_jetty_id(udma_dev, jetty);
	if (ret)
		goto err_alloc_jetty_id;

	init_jetty_x_qpn_bitmap(udma_dev, &jetty->qpn_map,
				udma_dev->caps.num_jetty_shift,
				UDMA_JETTY_QPN_PREFIX, jetty->jetty_id);

	ret = alloc_jetty_buf(udma_dev, jetty, cfg, udata);
	if (ret) {
		dev_err(udma_dev->dev, "alloc Jetty buf failed.\n");
		goto err_alloc_jetty_buf;
	}

	mutex_init(&jetty->tp_mutex);

	return &jetty->ubcore_jetty;

err_alloc_jetty_buf:
	clean_jetty_x_qpn_bitmap(&jetty->qpn_map);
	free_jetty_id(udma_dev, jetty);
err_alloc_jetty_id:
	kfree(jetty);

	return NULL;
}

int free_jetty_buf(struct udma_dev *dev, struct udma_jetty *jetty)
{
	int ret = 0;

	if (jetty->tp_mode == UBCORE_TP_UM) {
		ret = udma_modify_qp_jetty(dev, jetty, QPS_RESET);
		if (ret)
			dev_err(dev->dev,
				"modify qp(0x%llx) to RESET failed for um jetty.\n",
				jetty->qp.qpn);

		udma_destroy_qp_common(dev, &jetty->qp);
	} else if (jetty->tp_mode == UBCORE_TP_RC) {
		udma_db_unmap_user(dev, &jetty->rc_node.sdb);
		udma_mtr_destroy(dev, &jetty->rc_node.mtr);
	}

	return ret;
}

int udma_destroy_jetty(struct ubcore_jetty *jetty)
{
	struct udma_jetty *udma_jetty;
	struct udma_dev *udma_dev;
	int ret;

	udma_jetty = to_udma_jetty(jetty);
	udma_dev = to_udma_dev(jetty->ub_dev);

	ret = free_jetty_buf(udma_dev, udma_jetty);
	clean_jetty_x_qpn_bitmap(&udma_jetty->qpn_map);
	free_jetty_id(udma_dev, udma_jetty);
	kfree(udma_jetty);

	return ret;
}
Loading