Commit 9c9b2869 authored by Weibo Zhao's avatar Weibo Zhao Committed by JiangShui
Browse files

hns3 udma: cmd and mailbox for hns3-udma driver

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I85R2F


CVE: NA

------------------------------------------------

This patch add feature of cmd and mailbox for
hns3-udma driver.

There are two mode of cmd: polling mode and event
mode.

Signed-off-by: default avatarWeibo Zhao <zhaoweibo3@huawei.com>
parent c256e44d
Loading
Loading
Loading
Loading
+296 −0
Original line number Diff line number Diff line
@@ -43,6 +43,75 @@ void udma_cmd_cleanup(struct udma_dev *udma_dev)
	up_write(&udma_dev->cmd.udma_mb_rwsem);
}

int udma_cmd_use_events(struct udma_dev *udma_dev)
{
	struct udma_cmdq *udma_cmd = &udma_dev->cmd;
	int i;

	udma_cmd->context = (struct udma_cmd_context *)
		kcalloc(udma_cmd->max_cmds, sizeof(*udma_cmd->context),
			GFP_KERNEL);
	if (!udma_cmd->context)
		return -ENOMEM;

	for (i = 0; i < udma_cmd->max_cmds; ++i) {
		udma_cmd->context[i].token = i;
		udma_cmd->context[i].next = i + 1;
		init_completion(&udma_cmd->context[i].done);
	}
	udma_cmd->context[udma_cmd->max_cmds - 1].next = 0;
	udma_cmd->free_head = 0;

	sema_init(&udma_cmd->event_sem, udma_cmd->max_cmds);
	spin_lock_init(&udma_cmd->ctx_lock);

	udma_cmd->use_events = 1;

	return 0;
}

void udma_cmd_use_polling(struct udma_dev *udma_dev)
{
	struct udma_cmdq *udma_cmd = &udma_dev->cmd;

	kfree(udma_cmd->context);
	udma_cmd->use_events = 0;
}

struct udma_cmd_mailbox *udma_alloc_cmd_mailbox(struct udma_dev *dev)
{
	struct udma_cmd_mailbox *mailbox;

	mailbox = kzalloc(sizeof(*mailbox), GFP_KERNEL);
	if (!mailbox)
		return ERR_PTR(-ENOMEM);

	down_read(&dev->cmd.udma_mb_rwsem);
	mailbox->buf =
		dma_pool_zalloc(dev->cmd.pool, GFP_KERNEL, &mailbox->dma);
	if (!mailbox->buf) {
		up_read(&dev->cmd.udma_mb_rwsem);

		kfree(mailbox);
		return ERR_PTR(-ENOMEM);
	}

	return mailbox;
}

void udma_free_cmd_mailbox(struct udma_dev *dev,
			   struct udma_cmd_mailbox *mailbox)
{
	if (!mailbox)
		return;

	dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);

	up_read(&dev->cmd.udma_mb_rwsem);

	kfree(mailbox);
}

static uint32_t udma_cmd_hw_reseted(struct udma_dev *dev,
				    uint64_t instance_stage,
				    uint64_t reset_stage)
@@ -300,3 +369,230 @@ int udma_cmq_send(struct udma_dev *dev, struct udma_cmq_desc *desc, int num)

	return ret;
}

static int udma_wait_mbox_complete(struct udma_dev *dev, uint32_t timeout,
				   uint8_t *complete_status)
{
	struct udma_mbox_status *mb_st;
	struct udma_cmq_desc desc;
	unsigned long end;
	int ret = -EBUSY;
	uint32_t status;
	bool busy;

	mb_st = (struct udma_mbox_status *)desc.data;
	end = msecs_to_jiffies(timeout) + jiffies;
	while (udma_chk_mbox_is_avail(dev, &busy)) {
		status = 0;
		udma_cmq_setup_basic_desc(&desc, UDMA_OPC_QUERY_MB_ST,
					  true);
		ret = __udma_cmq_send(dev, &desc, 1);
		if (!ret) {
			status = le32_to_cpu(mb_st->mb_status_hw_run);
			/* No pending message exists in UDMA mbox. */
			if (!(status & MB_ST_HW_RUN_M))
				break;
		} else if (!udma_chk_mbox_is_avail(dev, &busy)) {
			break;
		}

		if (time_after(jiffies, end)) {
			dev_err_ratelimited(dev->dev,
					    "failed to wait mbox status 0x%x\n",
					    status);
			return -ETIMEDOUT;
		}

		cond_resched();
		ret = -EBUSY;
	}

	if (!ret) {
		*complete_status = (uint8_t)(status & MB_ST_COMPLETE_M);
	} else if (!udma_chk_mbox_is_avail(dev, &busy)) {
		/* Ignore all errors if the mbox is unavailable. */
		ret = busy ? -EBUSY : 0;
		*complete_status = MB_ST_COMPLETE_M;
	}

	return ret;
}

static int __udma_post_mbox(struct udma_dev *dev, struct udma_cmq_desc *desc,
			    uint16_t token, int vfid_event)
{
	struct udma_mbox *mb = (struct udma_mbox *)desc->data;

	mb->token_event_en = cpu_to_le32(vfid_event
					 << UDMA_MB_EVENT_EN_SHIFT | token);

	return udma_cmq_send(dev, desc, 1);
}

int udma_post_mbox(struct udma_dev *dev, struct udma_cmq_desc *desc,
		   uint16_t token, int vfid_event)
{
	uint8_t status = 0;
	int ret;

	/* Waiting for the mbox to be idle */
	ret = udma_wait_mbox_complete(dev, UDMA_GO_BIT_TIMEOUT_MSECS,
				      &status);
	if (unlikely(ret)) {
		dev_err_ratelimited(dev->dev,
				    "failed to check post mbox status = 0x%x, ret = %d.\n",
				    status, ret);
		return ret;
	}

	/* Post new message to mbox */
	ret = __udma_post_mbox(dev, desc, token, vfid_event);
	if (ret)
		dev_err_ratelimited(dev->dev,
				    "failed to post mailbox, ret = %d.\n", ret);

	return ret;
}

int udma_poll_mbox_done(struct udma_dev *dev, uint32_t timeout)
{
	uint8_t status = 0;
	int ret;

	ret = udma_wait_mbox_complete(dev, timeout, &status);
	if (!ret) {
		if (status != MB_ST_COMPLETE_SUCC)
			return -EBUSY;
	} else {
		dev_err_ratelimited(dev->dev,
				    "failed to check mbox status = 0x%x, ret = %d.\n",
				    status, ret);
	}

	return ret;
}

static int udma_cmd_mbox_post_hw(struct udma_dev *dev,
				 struct udma_cmq_desc *desc,
				 uint16_t token, int vfid_event)
{
	return dev->hw->post_mbox(dev, desc, token, vfid_event);
}

static int __udma_cmd_mbox_poll(struct udma_dev *dev,
				struct udma_cmq_desc *desc,
				uint32_t timeout, int vfid)
{
	int vfid_event = (vfid << 1);
	int ret, op;

	op = le32_to_cpu(((struct udma_mbox *)desc->data)->cmd_tag) & 0xff;
	ret = udma_cmd_mbox_post_hw(dev, desc, CMD_POLL_TOKEN, vfid_event);
	if (ret) {
		dev_err_ratelimited(dev->dev,
				    "failed to post mailbox %x in poll mode, ret = %d.\n",
				    op, ret);
		return ret;
	}

	return dev->hw->poll_mbox_done(dev, timeout);
}

static int udma_cmd_mbox_poll(struct udma_dev *dev, struct udma_cmq_desc *desc,
			      uint32_t timeout, int vfid)
{
	int ret;

	down(&dev->cmd.poll_sem);
	ret = __udma_cmd_mbox_poll(dev, desc, timeout, vfid);
	up(&dev->cmd.poll_sem);

	return ret;
}

void udma_cmd_event(struct udma_dev *udma_dev, uint16_t token, uint8_t status,
		    uint64_t out_param)
{
	struct udma_cmd_context *ctx =
		&udma_dev->cmd.context[token % udma_dev->cmd.max_cmds];

	if (unlikely(token != ctx->token)) {
		dev_err_ratelimited(udma_dev->dev,
				    "[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
				    token, ctx->token);
		return;
	}

	ctx->result = (status == CMD_RST_PRC_SUCCESS) ? 0 : (-EIO);
	ctx->out_param = out_param;
	complete(&ctx->done);
}

static int __udma_cmd_mbox_wait(struct udma_dev *udma_dev,
				struct udma_cmq_desc *desc,
				uint32_t timeout, int vfid)
{
	struct udma_cmdq *cmd = &udma_dev->cmd;
	int vfid_event = (vfid << 1) | 0x1;
	struct device *dev = udma_dev->dev;
	struct udma_cmd_context *context;
	int ret, op;

	spin_lock(&cmd->ctx_lock);
	do {
		context = &cmd->context[cmd->free_head];
		cmd->free_head = context->next;
	} while (context->busy);
	context->token += cmd->max_cmds;
	context->busy = 1;
	spin_unlock(&cmd->ctx_lock);

	reinit_completion(&context->done);

	op = le32_to_cpu(((struct udma_mbox *)desc->data)->cmd_tag) & 0xff;
	ret = udma_cmd_mbox_post_hw(udma_dev, desc, context->token, vfid_event);
	if (ret) {
		dev_err_ratelimited(dev,
				    "failed to post mailbox %x in event mode, ret = %d.\n",
				    op, ret);
		goto out;
	}

	if (!wait_for_completion_timeout(&context->done,
					 msecs_to_jiffies(timeout))) {
		dev_err_ratelimited(dev, "[cmd]token 0x%x of mailbox 0x%x timeout.\n",
				    context->token, op);
		ret = -EBUSY;
		goto out;
	}

	ret = context->result;
	if (ret)
		dev_err_ratelimited(dev, "[cmd]token 0x%x of mailbox 0x%x error %d\n",
				    context->token, op, ret);

out:
	context->busy = 0;
	return ret;
}

static int udma_cmd_mbox_wait(struct udma_dev *dev, struct udma_cmq_desc *desc,
			      uint32_t timeout, int vfid)
{
	int ret;

	down(&dev->cmd.event_sem);
	ret = __udma_cmd_mbox_wait(dev, desc, timeout, vfid);
	up(&dev->cmd.event_sem);

	return ret;
}

int udma_cmd_mbox(struct udma_dev *dev, struct udma_cmq_desc *desc,
		  uint32_t timeout, int vfid)
{
	if (dev->cmd.use_events)
		return udma_cmd_mbox_wait(dev, desc, timeout, vfid);
	else
		return udma_cmd_mbox_poll(dev, desc, timeout, vfid);
}
+45 −0
Original line number Diff line number Diff line
@@ -38,8 +38,53 @@ enum {
	CMD_RST_PRC_EBUSY,
};

struct udma_mbox {
	uint32_t	in_param_l;
	uint32_t	in_param_h;
	uint32_t	out_param_l;
	uint32_t	out_param_h;
	uint32_t	cmd_tag;
	uint32_t	token_event_en;
};

struct udma_mbox_status {
	uint32_t	mb_status_hw_run;
	uint32_t	rsv[5];
};

#define UDMA_GO_BIT_TIMEOUT_MSECS 10000

#define MB_ST_HW_RUN_M BIT(31)
#define MB_ST_COMPLETE_M GENMASK(7, 0)

#define MB_ST_COMPLETE_SUCC 1
#define UDMA_MB_EVENT_EN_SHIFT 16

void dump_desc(struct udma_dev *dev, struct udma_cmq_desc *desc);
struct udma_cmd_mailbox *udma_alloc_cmd_mailbox(struct udma_dev *dev);
void udma_free_cmd_mailbox(struct udma_dev *dev,
			   struct udma_cmd_mailbox *mailbox);
int udma_post_mbox(struct udma_dev *dev, struct udma_cmq_desc *desc,
		   uint16_t token, int vfid_event);
int udma_poll_mbox_done(struct udma_dev *dev, uint32_t timeout);
bool udma_chk_mbox_is_avail(struct udma_dev *dev, bool *busy);
void udma_cmq_setup_basic_desc(struct udma_cmq_desc *desc,
			       enum udma_opcode_type opcode,
			       bool is_read);
int udma_cmq_send(struct udma_dev *dev, struct udma_cmq_desc *desc, int num);
int udma_cmd_mbox(struct udma_dev *dev, struct udma_cmq_desc *desc,
		  uint32_t timeout, int vfid);
void udma_cmd_event(struct udma_dev *udma_dev, uint16_t token, uint8_t status,
		    uint64_t out_param);
static inline void mbox_desc_init(struct udma_mbox *mb, uint64_t in_param,
				  uint64_t out_param, uint32_t in_modifier,
				  uint16_t op)
{
	mb->in_param_l = cpu_to_le32(in_param);
	mb->in_param_h = cpu_to_le32(in_param >> 32);
	mb->out_param_l = cpu_to_le32(out_param);
	mb->out_param_h = cpu_to_le32(out_param >> 32);
	mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
}

#endif /* _UDMA_CMD_H */
+30 −0
Original line number Diff line number Diff line
@@ -25,6 +25,14 @@
#define UDMA_INVALID_ID				0xffff
#define UDMA_MAX_IRQ_NUM			128
#define UDMA_CAP_FLAGS_EX_SHIFT			12

#define UDMA_CMQ_TX_TIMEOUT			30000
#define UDMA_CMQ_DESC_NUM_S			3
#define UDMA_CMD_CSQ_DESC_NUM			1024

#define UDMA_TX_CMQ_BASEADDR_L_REG		0x07000
#define UDMA_TX_CMQ_BASEADDR_H_REG		0x07004
#define UDMA_TX_CMQ_DEPTH_REG			0x07008
#define UDMA_TX_CMQ_PI_REG			0x07010
#define UDMA_TX_CMQ_CI_REG			0x07014

@@ -42,9 +50,15 @@ enum udma_instance_state {
	UDMA_STATE_UNINIT,
};

enum {
	TYPE_CSQ = 1
};

struct udma_cmd_context {
	struct completion	done;
	int			result;
	int			next;
	uint64_t		out_param;
	uint16_t		token;
	uint16_t		busy;
};
@@ -89,6 +103,12 @@ struct udma_cmdq {
	struct rw_semaphore	udma_mb_rwsem;
	enum udma_cmdq_state	state;
};

struct udma_cmd_mailbox {
	void		       *buf;
	dma_addr_t		dma;
};

struct udma_netdev {
	spinlock_t		lock;
	struct net_device	*netdevs[UDMA_MAX_PORTS];
@@ -116,6 +136,10 @@ struct udma_hw {
	int (*hw_profile)(struct udma_dev *udma_dev);
	int (*hw_init)(struct udma_dev *udma_dev);
	void (*hw_exit)(struct udma_dev *udma_dev);
	int (*post_mbox)(struct udma_dev *udma_dev, struct udma_cmq_desc *desc,
			 uint16_t token, int event);
	int (*poll_mbox_done)(struct udma_dev *udma_dev,
			      uint32_t timeout);
};

struct udma_caps {
@@ -291,6 +315,12 @@ struct udma_dev {
	uint32_t			cong_algo_tmpl_id;
};

int udma_cmd_init(struct udma_dev *udma_dev);
void udma_cmd_cleanup(struct udma_dev *udma_dev);
int udma_cmd_use_events(struct udma_dev *udma_dev);
void udma_cmd_use_polling(struct udma_dev *udma_dev);
int udma_cmq_send(struct udma_dev *udma_dev,
		  struct udma_cmq_desc *desc, int num);
int udma_hnae_client_init(struct udma_dev *udma_dev);
void udma_hnae_client_exit(struct udma_dev *udma_dev);

+78 −1
Original line number Diff line number Diff line
@@ -393,14 +393,89 @@ static int udma_profile(struct udma_dev *udma_dev)
	return udma_pf_profile(udma_dev);
}

static int udma_cmq_init(struct udma_dev *udma_dev)
static int udma_alloc_cmq_desc(struct udma_dev *udma_dev,
			       struct udma_cmq_ring *ring)
{
	int size = ring->desc_num * sizeof(struct udma_cmq_desc);

	ring->desc = kzalloc(size, GFP_KERNEL);
	if (!ring->desc)
		return -ENOMEM;

	ring->desc_dma_addr = dma_map_single(udma_dev->dev, ring->desc, size,
					     DMA_BIDIRECTIONAL);
	if (dma_mapping_error(udma_dev->dev, ring->desc_dma_addr)) {
		ring->desc_dma_addr = 0;
		kfree(ring->desc);
		ring->desc = NULL;

		dev_err_ratelimited(udma_dev->dev,
				    "failed to map cmq desc addr.\n");
		return -ENOMEM;
	}

	return 0;
}

static void udma_free_cmq_desc(struct udma_dev *udma_dev,
			       struct udma_cmq_ring *ring)
{
	dma_unmap_single(udma_dev->dev, ring->desc_dma_addr,
			 ring->desc_num * sizeof(struct udma_cmq_desc),
			 DMA_BIDIRECTIONAL);

	ring->desc_dma_addr = 0;
	kfree(ring->desc);
	ring->desc = NULL;
}

static int init_csq(struct udma_dev *udma_dev,
		    struct udma_cmq_ring *csq)
{
	dma_addr_t dma;
	int ret;

	csq->desc_num = UDMA_CMD_CSQ_DESC_NUM;
	mutex_init(&csq->lock);
	csq->flag = TYPE_CSQ;
	csq->head = 0;

	ret = udma_alloc_cmq_desc(udma_dev, csq);
	if (ret)
		return ret;

	dma = csq->desc_dma_addr;
	ub_write(udma_dev, UDMA_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
	ub_write(udma_dev, UDMA_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
	ub_write(udma_dev, UDMA_TX_CMQ_DEPTH_REG,
		 (uint32_t)csq->desc_num >> UDMA_CMQ_DESC_NUM_S);

	/* Make sure to write CI first and then PI */
	ub_write(udma_dev, UDMA_TX_CMQ_CI_REG, 0);
	ub_write(udma_dev, UDMA_TX_CMQ_PI_REG, 0);

	return 0;
}

static int udma_cmq_init(struct udma_dev *udma_dev)
{
	struct udma_priv *priv = (struct udma_priv *)udma_dev->priv;
	int ret;

	priv->cmq.tx_timeout = UDMA_CMQ_TX_TIMEOUT;

	ret = init_csq(udma_dev, &priv->cmq.csq);
	if (ret)
		dev_err(udma_dev->dev, "failed to init CSQ, ret = %d.\n", ret);

	return ret;
}

static void udma_cmq_exit(struct udma_dev *udma_dev)
{
	struct udma_priv *priv = (struct udma_priv *)udma_dev->priv;

	udma_free_cmq_desc(udma_dev, &priv->cmq.csq);
}

static int udma_hw_init(struct udma_dev *udma_dev)
@@ -419,6 +494,8 @@ static const struct udma_hw udma_hw = {
	.hw_profile = udma_profile,
	.hw_init = udma_hw_init,
	.hw_exit = udma_hw_exit,
	.post_mbox = udma_post_mbox,
	.poll_mbox_done = udma_poll_mbox_done,
};

static void udma_get_cfg(struct udma_dev *udma_dev,
+16 −0
Original line number Diff line number Diff line
@@ -74,6 +74,21 @@ int udma_hnae_client_init(struct udma_dev *udma_dev)
		goto error_failed_hw_profile;
	}

	ret = udma_cmd_init(udma_dev);
	if (ret) {
		dev_err(dev, "cmd init failed!\n");
		goto error_failed_cmd_init;
	}

	if (udma_dev->cmd_mod) {
		ret = udma_cmd_use_events(udma_dev);
		if (ret) {
			udma_dev->cmd_mod = 0;
			dev_warn(dev,
				 "Cmd event mode failed, set back to poll!\n");
		}
	}

	ret = udma_dev->hw->hw_init(udma_dev);
	if (ret) {
		dev_err(dev, "hw_init failed!\n");
@@ -92,6 +107,7 @@ int udma_hnae_client_init(struct udma_dev *udma_dev)
	udma_dev->hw->hw_exit(udma_dev);

error_failed_engine_init:
error_failed_cmd_init:
error_failed_hw_profile:
	udma_dev->hw->cmq_exit(udma_dev);