Unverified Commit d0fa1db5 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!6660 udma: fix bugs of record_db

Merge Pull Request from: @zhaoweibo3 
 
The patches fixes the following problems:
1、Fix bugs of record_db. Address should be isolated by process.
2、Supplementary function declaration in user-ctrl.
3、Unified coding style.

https://gitee.com/openeuler/kernel/issues/I8V1IQ 
 
Link:https://gitee.com/openeuler/kernel/pulls/6660

 

Reviewed-by: default avatarChunzhi Hu <huchunzhi@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents ca174c5e 33e5598b
Loading
Loading
Loading
Loading
+62 −62
Original line number Diff line number Diff line
@@ -13,48 +13,48 @@
 *
 */

#ifndef _UDMA_ABI_H
#define _UDMA_ABI_H
#ifndef _HNS3_UDMA_ABI_H
#define _HNS3_UDMA_ABI_H

#include <linux/types.h>

#define MAP_COMMAND_MASK		0xff
#define MAP_INDEX_MASK			0xffffff
#define MAP_INDEX_SHIFT			8
#define UDMA_DWQE_PAGE_SIZE		65536
#define UDMA_JETTY_X_PREFIX_BIT_NUM	2
#define UDMA_JFS_QPN_PREFIX		0x2
#define UDMA_JFR_QPN_PREFIX		0x1
#define UDMA_JETTY_QPN_PREFIX		0x3
#define UDMA_ADDR_4K_MASK		0xfffUL
#define URMA_SEG_ACCESS_GUARD		(1UL << 5)
#define UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER BIT(0)
#define UDMA_DCA_INVALID_DCA_NUM ~0U
#define HNS3_UDMA_MAP_COMMAND_MASK		0xff
#define HNS3_UDMA_MAP_INDEX_MASK		0xffffff
#define HNS3_UDMA_MAP_INDEX_SHIFT		8
#define HNS3_UDMA_DWQE_PAGE_SIZE		65536
#define HNS3_UDMA_JETTY_X_PREFIX_BIT_NUM	2
#define HNS3_UDMA_JFS_QPN_PREFIX		0x2
#define HNS3_UDMA_JFR_QPN_PREFIX		0x1
#define HNS3_UDMA_JETTY_QPN_PREFIX		0x3
#define HNS3_UDMA_ADDR_4K_MASK			0xfffUL
#define HNS3_URMA_SEG_ACCESS_GUARD		(1UL << 5)
#define HNS3_UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER	BIT(0)
#define HNS3_UDMA_DCA_INVALID_DCA_NUM		~0U

enum {
	UDMA_MMAP_UAR_PAGE,
	UDMA_MMAP_DWQE_PAGE,
	UDMA_MMAP_RESET_PAGE,
	UDMA_MMAP_TYPE_DCA
	HNS3_UDMA_MMAP_UAR_PAGE,
	HNS3_UDMA_MMAP_DWQE_PAGE,
	HNS3_UDMA_MMAP_RESET_PAGE,
	HNS3_UDMA_MMAP_TYPE_DCA,
};

enum udma_jfc_init_attr_mask {
	UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS = 1 << 0,
enum hns3_udma_jfc_init_attr_mask {
	HNS3_UDMA_JFC_NOTIFY_OR_POE_CREATE_FLAGS = 1 << 0,
};

enum udma_jfc_create_flags {
	UDMA_JFC_CREATE_ENABLE_POE_MODE = 1 << 0,
	UDMA_JFC_CREATE_ENABLE_NOTIFY = 1 << 1,
enum hns3_udma_jfc_create_flags {
	HNS3_UDMA_JFC_CREATE_ENABLE_POE_MODE = 1 << 0, /* conflict with notify */
	HNS3_UDMA_JFC_CREATE_ENABLE_NOTIFY = 1 << 1,
};

enum udma_jfc_notify_mode {
	UDMA_JFC_NOTIFY_MODE_64B_ALIGN,
	UDMA_JFC_NOTIFY_MODE_4B_ALIGN,
	UDMA_JFC_NOTIFY_MODE_DDR_64B_ALIGN,
	UDMA_JFC_NOTIFY_MODE_DDR_4B_ALIGN,
enum hns3_udma_jfc_notify_mode {
	HNS3_UDMA_JFC_NOTIFY_MODE_64B_ALIGN,
	HNS3_UDMA_JFC_NOTIFY_MODE_4B_ALIGN,
	HNS3_UDMA_JFC_NOTIFY_MODE_DDR_64B_ALIGN,
	HNS3_UDMA_JFC_NOTIFY_MODE_DDR_4B_ALIGN,
};

struct udma_create_jfr_ucmd {
struct hns3_udma_create_jfr_ucmd {
	uint64_t buf_addr;
	uint64_t idx_addr;
	uint64_t db_addr;
@@ -66,38 +66,38 @@ struct udma_create_jfr_ucmd {
	bool     share_jfr;
};

enum udma_jfr_cap_flags {
	UDMA_JFR_CAP_RECORD_DB = 1 << 0,
enum hns3_udma_jfr_cap_flags {
	HNS3_UDMA_JFR_CAP_RECORD_DB = 1 << 0,
};

struct udma_create_jfr_resp {
struct hns3_udma_create_jfr_resp {
	uint32_t jfr_caps;
	uint32_t srqn;
};

struct udma_jfc_attr_ex {
	uint64_t	jfc_ex_mask; /* Use enum udma_jfc_init_attr_mask */
	uint64_t	create_flags; /* Use enum udma_jfc_create_flags */
struct hns3_udma_jfc_attr_ex {
	uint64_t	jfc_ex_mask; /* Use enum hns3_udma_jfc_init_attr_mask */
	uint64_t	create_flags; /* Use enum hns3_udma_jfc_create_flags */
	uint64_t	notify_addr;
	uint8_t		poe_channel; /* poe channel to use */
	uint8_t		notify_mode; /* Use enum udma_jfc_notify_mode */
	uint8_t		notify_mode; /* Use enum hns3_udma_jfc_notify_mode */
};

struct udma_create_jfc_ucmd {
struct hns3_udma_create_jfc_ucmd {
	uint64_t		buf_addr;
	uint64_t		db_addr;
	struct udma_jfc_attr_ex	jfc_attr_ex;
	struct hns3_udma_jfc_attr_ex	jfc_attr_ex;
};

enum udma_jfc_cap_flags {
	UDMA_JFC_CAP_RECORD_DB = 1 << 0,
enum hns3_udma_jfc_cap_flags {
	HNS3_UDMA_JFC_CAP_RECORD_DB = 1 << 0,
};

struct udma_create_jfc_resp {
struct hns3_udma_create_jfc_resp {
	uint32_t jfc_caps;
};

struct udma_create_tp_ucmd {
struct hns3_udma_create_tp_ucmd {
	bool			is_jetty;
	union {
		uint32_t	jfs_id;
@@ -113,20 +113,20 @@ struct udma_create_tp_ucmd {
	uint64_t		sdb_addr;
};

struct udma_create_jetty_ucmd {
	struct udma_create_tp_ucmd	create_tp_ucmd;
struct hns3_udma_create_jetty_ucmd {
	struct hns3_udma_create_tp_ucmd	create_tp_ucmd;
	uint32_t			jfr_id;
	uint32_t			srqn;
	uint64_t			buf_addr;
	uint64_t			sdb_addr;
};

enum udma_qp_cap_flags {
	UDMA_QP_CAP_RQ_RECORD_DB = 1 << 0,
	UDMA_QP_CAP_SQ_RECORD_DB = 1 << 1,
	UDMA_QP_CAP_OWNER_DB = 1 << 2,
	UDMA_QP_CAP_DYNAMIC_CTX_ATTACH = 1 << 4,
	UDMA_QP_CAP_DIRECT_WQE = 1 << 5,
enum hns3_udma_qp_cap_flags {
	HNS3_UDMA_QP_CAP_RQ_RECORD_DB = 1 << 0,
	HNS3_UDMA_QP_CAP_SQ_RECORD_DB = 1 << 1,
	HNS3_UDMA_QP_CAP_OWNER_DB = 1 << 2,
	HNS3_UDMA_QP_CAP_DYNAMIC_CTX_ATTACH = 1 << 4,
	HNS3_UDMA_QP_CAP_DIRECT_WQE = 1 << 5,
};

struct udp_srcport {
@@ -135,7 +135,7 @@ struct udp_srcport {
	uint8_t		um_udp_range;
};

struct udma_create_tp_resp {
struct hns3_udma_create_tp_resp {
	uint64_t		cap_flags;
	uint32_t		qpn;
	uint32_t		path_mtu;
@@ -143,32 +143,32 @@ struct udma_create_tp_resp {
	uint8_t			priority;
};

struct udma_create_jetty_resp {
	struct udma_create_tp_resp create_tp_resp;
struct hns3_udma_create_jetty_resp {
	struct hns3_udma_create_tp_resp create_tp_resp;
};

struct udma_create_jfs_ucmd {
	struct udma_create_tp_ucmd create_tp_ucmd;
struct hns3_udma_create_jfs_ucmd {
	struct hns3_udma_create_tp_ucmd create_tp_ucmd;
};

struct udma_create_jfs_resp {
	struct udma_create_tp_resp create_tp_resp;
struct hns3_udma_create_jfs_resp {
	struct hns3_udma_create_tp_resp create_tp_resp;
};

struct udma_create_ctx_ucmd {
struct hns3_udma_create_ctx_ucmd {
	uint32_t comp;
	uint32_t dca_max_qps;
	uint32_t dca_unit_size;
};

enum udma_context_comp_mask {
enum hns3_udma_context_comp_mask {
	UDMA_CONTEXT_MASK_DCA_PRIME_QPS = 1 << 0,
	UDMA_CONTEXT_MASK_DCA_UNIT_SIZE = 1 << 1,
	UDMA_CONTEXT_MASK_DCA_MAX_SIZE = 1 << 2,
	UDMA_CONTEXT_MASK_DCA_MIN_SIZE = 1 << 3,
};

struct udma_create_ctx_resp {
struct hns3_udma_create_ctx_resp {
	uint32_t num_comp_vectors;
	uint32_t num_qps_shift;
	uint32_t num_jfs_shift;
@@ -250,7 +250,7 @@ struct udma_dca_query_resp {
	uint32_t	page_count;
};

enum udma_user_ctl_handlers {
enum hns3_udma_user_ctl_handlers {
	UDMA_USER_CTL_FLUSH_CQE,
	UDMA_CONFIG_POE_CHANNEL,
	UDMA_QUERY_POE_CHANNEL,
@@ -263,4 +263,4 @@ enum udma_user_ctl_handlers {
	UDMA_OPCODE_NUM,
};

#endif /* _UDMA_ABI_H */
#endif /* _HNS3_UDMA_ABI_H */
+2 −2
Original line number Diff line number Diff line
@@ -284,13 +284,13 @@ static void dump_desc(struct udma_dev *dev,
	    ((desc->data[SUB_OPCODE_IDX] & 0xFF) ==
	     UDMA_CMD_WRITE_CQC_TIMER_BT0))
		dev_err_ratelimited(dev->dev,
			"Send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n",
			"send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n",
			desc->opcode, desc->data[0], desc->data[1],
			desc->data[2], desc->data[3], desc->data[4],
			desc->data[5], num_mailbox);
	else
		dev_info_ratelimited(dev->dev,
			"Send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n",
			"send cmd opcode:0x%4x, data: %08x %08x %08x %08x %08x %08x, mlbox: %08x\n",
			desc->opcode, desc->data[0], desc->data[1],
			desc->data[2], desc->data[3], desc->data[4],
			desc->data[5], num_mailbox);
+9 −8
Original line number Diff line number Diff line
@@ -19,18 +19,19 @@
#include "hns3_udma_device.h"
#include "hns3_udma_db.h"

int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt,
int udma_db_map_user(struct udma_ucontext *udma_ctx, uint64_t virt,
		     struct udma_db *db)
{
	struct udma_dev *udma_dev = to_udma_dev(udma_ctx->uctx.ub_dev);
	uint64_t page_addr = virt & PAGE_MASK;
	union ubcore_umem_flag access = {};
	struct udma_user_db_page *db_page;
	uint32_t offset;
	int ret = 0;

	mutex_lock(&udma_dev->pgdir_mutex);
	mutex_lock(&udma_ctx->pgdir_mutex);

	list_for_each_entry(db_page, &udma_dev->pgdir_list, list) {
	list_for_each_entry(db_page, &udma_ctx->pgdir_list, list) {
		if (db_page->user_virt == page_addr)
			goto found;
	}
@@ -53,7 +54,7 @@ int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt,
		goto out;
	}

	list_add(&db_page->list, &udma_dev->pgdir_list);
	list_add(&db_page->list, &udma_ctx->pgdir_list);

found:
	offset = virt - page_addr;
@@ -63,14 +64,14 @@ int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt,
	refcount_inc(&db_page->refcount);

out:
	mutex_unlock(&udma_dev->pgdir_mutex);
	mutex_unlock(&udma_ctx->pgdir_mutex);

	return ret;
}

void udma_db_unmap_user(struct udma_dev *udma_dev, struct udma_db *db)
void udma_db_unmap_user(struct udma_ucontext *udma_ctx, struct udma_db *db)
{
	mutex_lock(&udma_dev->pgdir_mutex);
	mutex_lock(&udma_ctx->pgdir_mutex);

	refcount_dec(&db->user_page->refcount);
	if (refcount_dec_if_one(&db->user_page->refcount)) {
@@ -79,5 +80,5 @@ void udma_db_unmap_user(struct udma_dev *udma_dev, struct udma_db *db)
		kfree(db->user_page);
	}

	mutex_unlock(&udma_dev->pgdir_mutex);
	mutex_unlock(&udma_ctx->pgdir_mutex);
}
+2 −2
Original line number Diff line number Diff line
@@ -18,9 +18,9 @@

#include "hns3_udma_device.h"

int udma_db_map_user(struct udma_dev *udma_dev, uint64_t virt,
int udma_db_map_user(struct udma_ucontext *udma_ctx, uint64_t virt,
		     struct udma_db *db);

void udma_db_unmap_user(struct udma_dev *udma_dev, struct udma_db *db);
void udma_db_unmap_user(struct udma_ucontext *udma_ctx, struct udma_db *db);

#endif /* _UDMA_DB_H */
+9 −9
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ void udma_enable_dca(struct udma_dev *dev, struct udma_qp *qp)
	INIT_LIST_HEAD(&cfg->aging_node);
	cfg->buf_id = UDMA_DCA_INVALID_BUF_ID;
	cfg->npages = qp->buff_size >> UDMA_HW_PAGE_SHIFT;
	cfg->dcan = UDMA_DCA_INVALID_DCA_NUM;
	cfg->dcan = HNS3_UDMA_DCA_INVALID_DCA_NUM;
}

static void stop_aging_dca_mem(struct udma_dca_ctx *ctx,
@@ -181,11 +181,11 @@ static void kick_dca_buf(struct udma_dev *dev, struct udma_dca_cfg *cfg,

static void free_dca_num(struct udma_dca_cfg *cfg, struct udma_dca_ctx *ctx)
{
	if (cfg->dcan == UDMA_DCA_INVALID_DCA_NUM)
	if (cfg->dcan == HNS3_UDMA_DCA_INVALID_DCA_NUM)
		return;

	ida_free(&ctx->ida, cfg->dcan);
	cfg->dcan = UDMA_DCA_INVALID_DCA_NUM;
	cfg->dcan = HNS3_UDMA_DCA_INVALID_DCA_NUM;
}

void udma_disable_dca(struct udma_dev *dev, struct udma_qp *qp)
@@ -317,7 +317,7 @@ static uint32_t alloc_dca_num(struct udma_dca_ctx *ctx)

	ret = ida_alloc_range(&ctx->ida, 0, ctx->max_qps - 1, GFP_KERNEL);
	if (ret < 0)
		return UDMA_DCA_INVALID_DCA_NUM;
		return HNS3_UDMA_DCA_INVALID_DCA_NUM;

	stop_free_dca_buf(ctx, ret);
	update_dca_buf_status(ctx, ret, false);
@@ -418,7 +418,7 @@ static void unregister_dca_mem(struct udma_dev *dev, struct udma_dca_ctx *ctx,
}

static uint32_t get_udca_max_qps(struct udma_dev *udma_dev,
				 struct udma_create_ctx_ucmd *ucmd)
				 struct hns3_udma_create_ctx_ucmd *ucmd)
{
	uint32_t qp_num = 0;

@@ -450,7 +450,7 @@ static int udma_query_qpc(struct udma_dev *udma_dev, uint32_t qpn,
	int ret;

	mailbox = udma_alloc_cmd_mailbox(udma_dev);
	if (IS_ERR_OR_NULL(mailbox)) {
	if (IS_ERR(mailbox)) {
		dev_err(udma_dev->dev, "alloc mailbox failed\n");
		ret = PTR_ERR(mailbox);
		goto alloc_mailbox_fail;
@@ -599,12 +599,12 @@ int udma_register_udca(struct udma_dev *udma_dev,
		       struct udma_ucontext *context, struct ubcore_udrv_priv *udrv_data)
{
	struct udma_dca_ctx *dca_ctx = &context->dca_ctx;
	struct udma_create_ctx_ucmd ucmd = {};
	struct hns3_udma_create_ctx_ucmd ucmd = {};
	int max_qps;
	int ret;

	ret = copy_from_user(&ucmd, (void *)udrv_data->in_addr,
			     min(udrv_data->in_len, (uint32_t)sizeof(ucmd)));
			     min_t(uint32_t, udrv_data->in_len, (uint32_t)sizeof(ucmd)));
	if (ret) {
		dev_err(udma_dev->dev, "Failed to copy udata, ret = %d.\n",
			ret);
@@ -1157,7 +1157,7 @@ int udma_dca_attach(struct udma_dev *dev, struct udma_dca_attach_attr *attr,
	cfg->attach_count++;
	spin_unlock(&cfg->lock);

	resp->alloc_flags |= UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER;
	resp->alloc_flags |= HNS3_UDMA_DCA_ATTACH_FLAGS_NEW_BUFFER;
	resp->alloc_pages = cfg->npages;
	resp->dcan = cfg->dcan;
	update_dca_buf_status(ctx, cfg->dcan, true);
Loading