Commit e94ccb1b authored by Weibo Zhao's avatar Weibo Zhao Committed by JiangShui
Browse files

hns3 udma: add register and unregister segment

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I85R2F


CVE: NA

-----------------------------------------------------

Segment is a memory that alloced by user. User calls
register segment to allow hardware to read / write
data in memory. Driver should create a memory translate
and protect table of segment.

Signed-off-by: default avatarWeibo Zhao <zhaoweibo3@huawei.com>
parent 8d47f8c2
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/ \

$(MODULE_NAME)-objs := hns3_udma_hw.o hns3_udma_main.o hns3_udma_cmd.o \
			hns3_udma_hem.o hns3_udma_qp.o hns3_udma_eq.o \
			hns3_udma_db.o hns3_udma_jfc.o hns3_udma_jfr.o
			hns3_udma_db.o hns3_udma_jfc.o hns3_udma_jfr.o \
			hns3_udma_segment.o

obj-$(CONFIG_UB_UDMA_HNS3) := hns3_udma.o
+1 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#define UDMA_JETTY_X_PREFIX_BIT_NUM	2
#define UDMA_JFR_QPN_PREFIX		0x1
#define UDMA_ADDR_4K_MASK		0xfffUL
#define URMA_SEG_ACCESS_GUARD		(1UL << 5)

enum {
	UDMA_MMAP_UAR_PAGE,
+18 −0
Original line number Diff line number Diff line
@@ -719,6 +719,19 @@ struct udma_dev {
	spinlock_t			dip_list_lock;
};

struct udma_seg {
	struct ubcore_target_seg		ubcore_seg;
	uint64_t		iova;
	uint64_t		size;
	uint32_t		key;
	uint32_t		pd;
	uint32_t		access;
	int			enabled;
	uint32_t		pbl_hop_num;
	struct udma_mtr		pbl_mtr;
	uint32_t		npages;
};

static inline void *udma_buf_offset(struct udma_buf *buf,
				    uint32_t offset)
{
@@ -755,6 +768,11 @@ static inline struct udma_dev *to_udma_dev(const struct ubcore_device *ubcore_de
	return container_of(ubcore_dev, struct udma_dev, ub_dev);
}

static inline struct udma_seg *to_udma_seg(struct ubcore_target_seg *seg)
{
	return container_of(seg, struct udma_seg, ubcore_seg);
}

static inline uint32_t to_udma_hem_entries_size(uint32_t count,
						uint32_t buf_shift)
{
+3 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include "hns3_udma_hem.h"
#include "hns3_udma_jfr.h"
#include "hns3_udma_jfc.h"
#include "hns3_udma_segment.h"
#include "hns3_udma_cmd.h"
static int udma_set_eid(struct ubcore_device *dev, union ubcore_eid eid)
{
@@ -179,6 +180,8 @@ static struct ubcore_ops g_udma_dev_ops = {
	.alloc_ucontext = udma_alloc_ucontext,
	.free_ucontext = udma_free_ucontext,
	.mmap = udma_mmap,
	.register_seg = udma_register_seg,
	.unregister_seg = udma_unregister_seg,
	.create_jfc = udma_create_jfc,
	.modify_jfc = udma_modify_jfc,
	.destroy_jfc = udma_destroy_jfc,
+313 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Huawei UDMA Linux driver
 * Copyright (c) 2023-2023 Hisilicon Limited.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
 * for more details.
 *
 */

#include "urma/ubcore_types.h"
#include "hns3_udma_abi.h"
#include "hns3_udma_hem.h"
#include "hns3_udma_cmd.h"
#include "hns3_udma_segment.h"

static uint32_t hw_index_to_key(int ind)
{
	return ((uint32_t)ind << SEG_KEY_OFFSET);
}

uint64_t key_to_hw_index(uint32_t key)
{
	return (key >> SEG_KEY_OFFSET);
}

static int udma_hw_create_mpt(struct udma_dev *udma_dev,
			      struct udma_cmd_mailbox *mailbox,
			      uint64_t mpt_index)
{
	struct udma_cmq_desc desc;
	struct udma_mbox *mb;

	mb = (struct udma_mbox *)desc.data;
	udma_cmq_setup_basic_desc(&desc, UDMA_OPC_POST_MB, false);
	mbox_desc_init(mb, mailbox->dma, 0, mpt_index, UDMA_CMD_CREATE_MPT);

	return udma_cmd_mbox(udma_dev, &desc, UDMA_CMD_TIMEOUT_MSECS, 0);
}

int udma_hw_destroy_mpt(struct udma_dev *udma_dev,
			struct udma_cmd_mailbox *mailbox,
			uint64_t mpt_index)
{
	struct udma_cmq_desc desc;
	struct udma_mbox *mb;

	mb = (struct udma_mbox *)desc.data;
	udma_cmq_setup_basic_desc(&desc, UDMA_OPC_POST_MB, false);
	mbox_desc_init(mb, 0, 0, mpt_index, UDMA_CMD_DESTROY_MPT);

	return udma_cmd_mbox(udma_dev, &desc, UDMA_CMD_TIMEOUT_MSECS, 0);
}

static int alloc_seg_key(struct udma_dev *udma_dev, struct udma_seg *seg)
{
	struct udma_ida *seg_ida = &udma_dev->seg_table.seg_ida;
	int err;
	int id;

	id = ida_alloc_range(&seg_ida->ida, seg_ida->min, seg_ida->max,
			     GFP_KERNEL);
	if (id < 0) {
		dev_err(udma_dev->dev, "failed to alloc id for MR key, id(%d)\n",
			id);
		return -ENOMEM;
	}

	seg->key = hw_index_to_key(id);

	err = udma_table_get(udma_dev, &udma_dev->seg_table.table,
			     (uint64_t)id);
	if (err) {
		dev_err(udma_dev->dev,
			"failed to alloc mtpt, ret = %d.\n", err);
		goto err_free_bitmap;
	}

	return 0;
err_free_bitmap:
	ida_free(&seg_ida->ida, id);
	return err;
}

static int alloc_seg_pbl(struct udma_dev *udma_dev, struct udma_seg *seg,
			 bool is_user)
{
	struct udma_buf_attr buf_attr = {};
	int err;

	seg->pbl_hop_num = udma_dev->caps.pbl_hop_num;
	buf_attr.page_shift = udma_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
	buf_attr.region[0].size = seg->size;
	buf_attr.region[0].hopnum = seg->pbl_hop_num;
	buf_attr.region_count = 1;
	buf_attr.mtt_only = false;

	err = udma_mtr_create(udma_dev, &seg->pbl_mtr, &buf_attr,
			      udma_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
			      seg->iova, is_user);
	if (err)
		dev_err(udma_dev->dev, "failed to alloc pbl mtr, ret = %d.\n",
			err);
	else
		seg->npages = seg->pbl_mtr.hem_cfg.buf_pg_count;

	return err;
}

static int set_mtpt_pbl(struct udma_dev *udma_dev,
			struct udma_mpt_entry *mpt_entry,
			struct udma_seg *seg)
{
	uint64_t pages[UDMA_MAX_INNER_MTPT_NUM] = {};
	uint64_t pbl_ba;
	int i, count;

	count = udma_mtr_find(udma_dev, &seg->pbl_mtr, 0, pages,
			      min_t(int, ARRAY_SIZE(pages), seg->npages),
			      &pbl_ba);
	if (count < 1) {
		dev_err(udma_dev->dev, "failed to find PBL mtr, count = %d.\n",
			count);
		return -ENOBUFS;
	}

	/* Aligned to the hardware address access unit */
	for (i = 0; i < count; i++)
		pages[i] >>= PA_PAGE_SHIFT;

	mpt_entry->pbl_size = cpu_to_le32(seg->npages);
	mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PAGE_OFFSET);
	udma_reg_write(mpt_entry, MPT_PBL_BA_H,
		       upper_32_bits(pbl_ba >> MPT_PAGE_OFFSET));
	mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
	udma_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
	mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
	udma_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
	udma_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
		       to_hr_hw_page_shift(seg->pbl_mtr.hem_cfg.buf_pg_shift));

	return 0;
}

static int udma_write_seg_mpt(struct udma_dev *udma_dev,
			      void *mb_buf, struct udma_seg *seg)
{
	struct udma_mpt_entry *mpt_entry;
	int ret = 0;

	mpt_entry = (struct udma_mpt_entry *)mb_buf;
	memset(mpt_entry, 0, sizeof(*mpt_entry));

	udma_reg_write(mpt_entry, MPT_ST, MPT_ST_VALID);
	udma_reg_write(mpt_entry, MPT_PD, seg->pd);
	udma_reg_enable(mpt_entry, MPT_L_INV_EN);

	udma_reg_write(mpt_entry, MPT_RW_EN,
		       !!(seg->access & UBCORE_ACCESS_REMOTE_WRITE));
	udma_reg_write(mpt_entry, MPT_LW_EN,
		       !!(seg->access & UBCORE_ACCESS_LOCAL_WRITE));
	udma_reg_write(mpt_entry, MPT_R_INV_EN,
		       !!(seg->access & UBCORE_ACCESS_REMOTE_INVALIDATE));

	mpt_entry->len_l = cpu_to_le32(lower_32_bits(seg->size));
	mpt_entry->len_h = cpu_to_le32(upper_32_bits(seg->size));
	mpt_entry->lkey = cpu_to_le32(seg->key);
	mpt_entry->va_l = cpu_to_le32(lower_32_bits(seg->iova));
	mpt_entry->va_h = cpu_to_le32(upper_32_bits(seg->iova));

	udma_reg_write(mpt_entry, MPT_PERSIST_EN, 1);

	if (seg->pbl_hop_num != UDMA_HOP_NUM_0)
		udma_reg_write(mpt_entry, MPT_PBL_HOP_NUM, seg->pbl_hop_num);

	udma_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
		       to_hr_hw_page_shift(seg->pbl_mtr.hem_cfg.ba_pg_shift));
	udma_reg_enable(mpt_entry, MPT_INNER_PA_VLD);

	ret = set_mtpt_pbl(udma_dev, mpt_entry, seg);

	return ret;
}

static int udma_seg_enable(struct udma_dev *udma_dev, struct udma_seg *seg)
{
	uint64_t seg_idx = key_to_hw_index(seg->key);
	struct device *dev = udma_dev->dev;
	struct udma_cmd_mailbox *mailbox;
	int ret;

	/* Allocate mailbox memory */
	mailbox = udma_alloc_cmd_mailbox(udma_dev);
	if (IS_ERR(mailbox)) {
		ret = PTR_ERR(mailbox);
		return ret;
	}

	ret = udma_write_seg_mpt(udma_dev, mailbox->buf, seg);
	if (ret) {
		dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
		goto err_page;
	}

	ret = udma_hw_create_mpt(udma_dev, mailbox,
				 seg_idx & (udma_dev->caps.num_mtpts - 1));
	if (ret) {
		dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
		goto err_page;
	}

err_page:
	udma_free_cmd_mailbox(udma_dev, mailbox);

	return ret;
}

static void free_seg_pbl(struct udma_dev *udma_dev, struct udma_seg *seg)
{
	udma_mtr_destroy(udma_dev, &seg->pbl_mtr);
}

static void free_seg_key(struct udma_dev *udma_dev, struct udma_seg *seg)
{
	uint64_t obj = key_to_hw_index(seg->key);

	udma_table_put(udma_dev, &udma_dev->seg_table.table, obj);
	ida_free(&udma_dev->seg_table.seg_ida.ida, (int)obj);
}

struct ubcore_target_seg *udma_register_seg(struct ubcore_device *dev,
				 const struct ubcore_seg_cfg *cfg,
				 struct ubcore_udata *udata)
{
	struct udma_dev *udma_dev = to_udma_dev(dev);
	struct udma_ucontext *udma_ctx;
	struct udma_seg *seg;
	int ret;

	if (cfg->flag.bs.access >= URMA_SEG_ACCESS_GUARD) {
		dev_err(udma_dev->dev, "Invalid segment access 0x%x.\n",
			cfg->flag.bs.access);
		return NULL;
	}

	seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
	if (!seg)
		return NULL;

	udma_ctx = to_udma_ucontext(udata->uctx);
	seg->iova = cfg->va;
	seg->size = cfg->len;
	seg->pd = udma_ctx->pdn;
	seg->access = cfg->flag.bs.access;

	ret = alloc_seg_key(udma_dev, seg);
	if (ret)
		goto err_alloc_key;

	ret = alloc_seg_pbl(udma_dev, seg, !!udata);
	if (ret)
		goto err_alloc_pbl;

	ret = udma_seg_enable(udma_dev, seg);
	if (ret)
		goto err_enable_seg;
	seg->enabled = 1;
	seg->ubcore_seg.seg.key_id = seg->key;


	return &seg->ubcore_seg;

err_enable_seg:
	free_seg_pbl(udma_dev, seg);
err_alloc_pbl:
	free_seg_key(udma_dev, seg);
err_alloc_key:
	kfree(seg);
	return NULL;
}

void udma_seg_free(struct udma_dev *udma_dev, struct udma_seg *seg)
{
	int ret;

	if (seg->enabled) {
		ret = udma_hw_destroy_mpt(udma_dev, NULL,
					  key_to_hw_index(seg->key) &
					  (udma_dev->caps.num_mtpts - 1));
		if (ret)
			dev_warn(udma_dev->dev, "failed to destroy mpt, ret = %d.\n",
				 ret);
	}

	free_seg_pbl(udma_dev, seg);
	free_seg_key(udma_dev, seg);
}

int udma_unregister_seg(struct ubcore_target_seg *seg)
{
	struct udma_dev *udma_dev = to_udma_dev(seg->ub_dev);
	struct udma_seg *udma_seg = to_udma_seg(seg);

	udma_seg_free(udma_dev, udma_seg);
	kfree(udma_seg);

	return 0;
}
Loading