Commit 075cafff authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge branch 'macsec-fixes-for-cn10kb'

Geetha sowjanya says:

====================
Macsec fixes for CN10KB

This patch set has fixes for the issues encountered while
testing macsec on CN10KB silicon. Below is the description
of patches:

Patch 1: For each LMAC two MCSX_MCS_TOP_SLAVE_CHANNEL_CFG registers exist
	 in CN10KB. Bypass has to be disabled in two registers.

Patch 2: Add workaround for errata w.r.t accessing TCAM DATA and MASK registers.

Patch 3: Fixes the parser configuration to allow PTP traffic.

Patch 4: Addresses the IP vector and block level interrupt mask changes.

Patch 5: Fix NULL pointer crashes when rebooting

Patch 6: Since MCS is global block shared by all LMACS the TCAM match
	 must include macsec DMAC also to distinguish each macsec interface

Patch 7: Before freeing MCS hardware resource to AF clear the stats also.

Patch 8: Stats which share single counter in hardware are tracked in software.
	 This tracking was based on wrong secy mode params.
	 Use correct secy mode params

Patch 9: When updating secy mode params, PN number was also reset to
	 initial values. Hence do not write to PN value register when
	 updating secy.
====================

Link: https://lore.kernel.org/r/20230426062528.20575-1-gakula@marvell.com


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents c23ae509 3c99bace
Loading
Loading
Loading
Loading
+54 −56
Original line number Diff line number Diff line
@@ -473,6 +473,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
		for (reg_id = 0; reg_id < 4; reg_id++) {
			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
			mcs_reg_write(mcs, reg, data[reg_id]);
		}
		for (reg_id = 0; reg_id < 4; reg_id++) {
			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
			mcs_reg_write(mcs, reg, mask[reg_id]);
		}
@@ -480,6 +482,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
		for (reg_id = 0; reg_id < 4; reg_id++) {
			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
			mcs_reg_write(mcs, reg, data[reg_id]);
		}
		for (reg_id = 0; reg_id < 4; reg_id++) {
			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
			mcs_reg_write(mcs, reg, mask[reg_id]);
		}
@@ -494,6 +498,9 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)

	/* Flow entry */
	flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
	__set_bit(flow_id, mcs->rx.flow_ids.bmap);
	__set_bit(flow_id, mcs->tx.flow_ids.bmap);

	for (reg_id = 0; reg_id < 4; reg_id++) {
		reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
		mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
@@ -504,6 +511,8 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
	}
	/* secy */
	secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
	__set_bit(secy_id, mcs->rx.secy.bmap);
	__set_bit(secy_id, mcs->tx.secy.bmap);

	/* Set validate frames to NULL and enable control port */
	plcy = 0x7ull;
@@ -528,6 +537,7 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
	/* Enable Flowid entry */
	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);

	return 0;
}

@@ -926,60 +936,42 @@ static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
	mcs_add_intr_wq_entry(mcs, &event);
}

static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
				 enum mcs_direction dir)
{
	struct mcs_intr_event event = { 0 };
	int i;
	u64 val, reg;
	int lmac;

	if (!(intr & MCS_BBE_INT_MASK))
	if (!(intr & 0x6ULL))
		return;

	event.mcs_id = mcs->mcs_id;
	event.pcifunc = mcs->pf_map[0];

	for (i = 0; i < MCS_MAX_BBE_INT; i++) {
		if (!(intr & BIT_ULL(i)))
			continue;

		/* Lower nibble denotes data fifo overflow interrupts and
		 * upper nibble indicates policy fifo overflow interrupts.
		 */
		if (intr & 0xFULL)
			event.intr_mask = (dir == MCS_RX) ?
					  MCS_BBE_RX_DFIFO_OVERFLOW_INT :
					  MCS_BBE_TX_DFIFO_OVERFLOW_INT;
	if (intr & BIT_ULL(1))
		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
					MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
	else
			event.intr_mask = (dir == MCS_RX) ?
					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
					MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
	val = mcs_reg_read(mcs, reg);

		/* Notify the lmac_id info which ran into BBE fatal error */
		event.lmac_id = i & 0x3ULL;
		mcs_add_intr_wq_entry(mcs, &event);
	/* policy/data over flow occurred */
	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
		if (!(val & BIT_ULL(lmac)))
			continue;
		dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
	}
}

static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
				 enum mcs_direction dir)
{
	struct mcs_intr_event event = { 0 };
	int i;
	int lmac;

	if (!(intr & MCS_PAB_INT_MASK))
	if (!(intr & 0xFFFFFULL))
		return;

	event.mcs_id = mcs->mcs_id;
	event.pcifunc = mcs->pf_map[0];

	for (i = 0; i < MCS_MAX_PAB_INT; i++) {
		if (!(intr & BIT_ULL(i)))
			continue;

		event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
				  MCS_PAB_TX_CHAN_OVERFLOW_INT;

		/* Notify the lmac_id info which ran into PAB fatal error */
		event.lmac_id = i;
		mcs_add_intr_wq_entry(mcs, &event);
	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
		if (intr & BIT_ULL(lmac))
			dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
	}
}

@@ -988,9 +980,8 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
	struct mcs *mcs = (struct mcs *)mcs_irq;
	u64 intr, cpm_intr, bbe_intr, pab_intr;

	/* Disable and clear the interrupt */
	/* Disable  the interrupt */
	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));

	/* Check which block has interrupt*/
	intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
@@ -1037,7 +1028,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
	/* BBE RX */
	if (intr & MCS_BBE_RX_INT_ENA) {
		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);

		/* Clear the interrupt */
		mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
@@ -1047,7 +1038,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
	/* BBE TX */
	if (intr & MCS_BBE_TX_INT_ENA) {
		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);

		/* Clear the interrupt */
		mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
@@ -1057,7 +1048,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
	/* PAB RX */
	if (intr & MCS_PAB_RX_INT_ENA) {
		pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
		mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);

		/* Clear the interrupt */
		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
@@ -1067,14 +1058,15 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
	/* PAB TX */
	if (intr & MCS_PAB_TX_INT_ENA) {
		pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
		mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);

		/* Clear the interrupt */
		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
	}

	/* Enable the interrupt */
	/* Clear and enable the interrupt */
	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));

	return IRQ_HANDLED;
@@ -1156,7 +1148,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
		return ret;
	}

	ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
	ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
			  mcs_ip_intr_handler, 0, "MCS_IP", mcs);
	if (ret) {
		dev_err(mcs->dev, "MCS IP irq registration failed\n");
@@ -1175,11 +1167,11 @@ static int mcs_register_interrupts(struct mcs *mcs)
	mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);

	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);

	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);

	mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
	if (!mcs->tx_sa_active) {
@@ -1190,7 +1182,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
	return ret;

free_irq:
	free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
	free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
exit:
	pci_free_irq_vectors(mcs->pdev);
	mcs->num_vec = 0;
@@ -1325,8 +1317,11 @@ void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
{
	u64 reg;
	int id = lmac_id * 2;

	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
	mcs_reg_write(mcs, reg, (u64)mode);
	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
	mcs_reg_write(mcs, reg, (u64)mode);
}

@@ -1484,6 +1479,7 @@ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
	hw->lmac_cnt = 20;		/* lmacs/ports per mcs block */
	hw->mcs_x2p_intf = 5;		/* x2p clabration intf */
	hw->mcs_blks = 1;		/* MCS blocks */
	hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
}

static struct mcs_ops cn10kb_mcs_ops = {
@@ -1492,6 +1488,8 @@ static struct mcs_ops cn10kb_mcs_ops = {
	.mcs_tx_sa_mem_map_write	= cn10kb_mcs_tx_sa_mem_map_write,
	.mcs_rx_sa_mem_map_write	= cn10kb_mcs_rx_sa_mem_map_write,
	.mcs_flowid_secy_map		= cn10kb_mcs_flowid_secy_map,
	.mcs_bbe_intr_handler		= cn10kb_mcs_bbe_intr_handler,
	.mcs_pab_intr_handler		= cn10kb_mcs_pab_intr_handler,
};

static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1592,7 +1590,7 @@ static void mcs_remove(struct pci_dev *pdev)

	/* Set MCS to external bypass */
	mcs_set_external_bypass(mcs, true);
	free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
	free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
	pci_free_irq_vectors(pdev);
	pci_release_regions(pdev);
	pci_disable_device(pdev);
+12 −14
Original line number Diff line number Diff line
@@ -43,23 +43,14 @@
/* Reserved resources for default bypass entry */
#define MCS_RSRC_RSVD_CNT		1

/* MCS Interrupt Vector Enumeration */
enum mcs_int_vec_e {
	MCS_INT_VEC_MIL_RX_GBL		= 0x0,
	MCS_INT_VEC_MIL_RX_LMACX	= 0x1,
	MCS_INT_VEC_MIL_TX_LMACX	= 0x5,
	MCS_INT_VEC_HIL_RX_GBL		= 0x9,
	MCS_INT_VEC_HIL_RX_LMACX	= 0xa,
	MCS_INT_VEC_HIL_TX_GBL		= 0xe,
	MCS_INT_VEC_HIL_TX_LMACX	= 0xf,
	MCS_INT_VEC_IP			= 0x13,
	MCS_INT_VEC_CNT			= 0x14,
};
/* MCS Interrupt Vector */
#define MCS_CNF10KB_INT_VEC_IP	0x13
#define MCS_CN10KB_INT_VEC_IP	0x53

#define MCS_MAX_BBE_INT			8ULL
#define MCS_BBE_INT_MASK		0xFFULL

#define MCS_MAX_PAB_INT			4ULL
#define MCS_MAX_PAB_INT		8ULL
#define MCS_PAB_INT_MASK	0xFULL

#define MCS_BBE_RX_INT_ENA		BIT_ULL(0)
@@ -137,6 +128,7 @@ struct hwinfo {
	u8 lmac_cnt;
	u8 mcs_blks;
	unsigned long	lmac_bmap; /* bitmap of enabled mcs lmac */
	u16 ip_vec;
};

struct mcs {
@@ -165,6 +157,8 @@ struct mcs_ops {
	void	(*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
	void	(*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
	void	(*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
	void	(*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
	void	(*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
};

extern struct pci_driver mcs_driver;
@@ -219,6 +213,8 @@ void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *ma
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void cn10kb_mcs_parser_cfg(struct mcs *mcs);
void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);

/* CNF10K-B APIs */
struct mcs_ops *cnf10kb_get_mac_ops(void);
@@ -229,6 +225,8 @@ void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *m
void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);

/* Stats APIs */
void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+63 −0
Original line number Diff line number Diff line
@@ -13,6 +13,8 @@ static struct mcs_ops cnf10kb_mcs_ops = {
	.mcs_tx_sa_mem_map_write	= cnf10kb_mcs_tx_sa_mem_map_write,
	.mcs_rx_sa_mem_map_write	= cnf10kb_mcs_rx_sa_mem_map_write,
	.mcs_flowid_secy_map		= cnf10kb_mcs_flowid_secy_map,
	.mcs_bbe_intr_handler		= cnf10kb_mcs_bbe_intr_handler,
	.mcs_pab_intr_handler		= cnf10kb_mcs_pab_intr_handler,
};

struct mcs_ops *cnf10kb_get_mac_ops(void)
@@ -31,6 +33,7 @@ void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
	hw->lmac_cnt = 4;		/* lmacs/ports per mcs block */
	hw->mcs_x2p_intf = 1;		/* x2p clabration intf */
	hw->mcs_blks = 7;		/* MCS blocks */
	hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
}

void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
@@ -212,3 +215,63 @@ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
		mcs_add_intr_wq_entry(mcs, &event);
	}
}

void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
				  enum mcs_direction dir)
{
	struct mcs_intr_event event = { 0 };
	int i;

	if (!(intr & MCS_BBE_INT_MASK))
		return;

	event.mcs_id = mcs->mcs_id;
	event.pcifunc = mcs->pf_map[0];

	for (i = 0; i < MCS_MAX_BBE_INT; i++) {
		if (!(intr & BIT_ULL(i)))
			continue;

		/* Lower nibble denotes data fifo overflow interrupts and
		 * upper nibble indicates policy fifo overflow interrupts.
		 */
		if (intr & 0xFULL)
			event.intr_mask = (dir == MCS_RX) ?
					  MCS_BBE_RX_DFIFO_OVERFLOW_INT :
					  MCS_BBE_TX_DFIFO_OVERFLOW_INT;
		else
			event.intr_mask = (dir == MCS_RX) ?
					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;

		/* Notify the lmac_id info which ran into BBE fatal error */
		event.lmac_id = i & 0x3ULL;
		mcs_add_intr_wq_entry(mcs, &event);
	}
}

void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
				  enum mcs_direction dir)
{
	struct mcs_intr_event event = { 0 };
	int i;

	if (!(intr & MCS_PAB_INT_MASK))
		return;

	event.mcs_id = mcs->mcs_id;
	event.pcifunc = mcs->pf_map[0];

	for (i = 0; i < MCS_MAX_PAB_INT; i++) {
		if (!(intr & BIT_ULL(i)))
			continue;

		event.intr_mask = (dir == MCS_RX) ?
				  MCS_PAB_RX_CHAN_OVERFLOW_INT :
				  MCS_PAB_TX_CHAN_OVERFLOW_INT;

		/* Notify the lmac_id info which ran into PAB fatal error */
		event.lmac_id = i;
		mcs_add_intr_wq_entry(mcs, &event);
	}
}
+5 −1
Original line number Diff line number Diff line
@@ -97,6 +97,7 @@
#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a)          (0x46f8ull + (a) * 0x8ull)
#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a)	(0x788ull + (a) * 0x8ull)
#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a)		(0x4738ull + (a) * 0x8ull)
#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a)		(0x3b98ull + (a) * 0x8ull)
#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({	\
	u64 offset;					\
							\
@@ -275,7 +276,10 @@
#define MCSX_BBE_RX_SLAVE_CAL_ENTRY			0x180ull
#define MCSX_BBE_RX_SLAVE_CAL_LEN			0x188ull
#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a)		(0x290ull + (a) * 0x40ull)

#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0		0xe20
#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0		0x1298
#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0		0xe40
#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0		0x12b8
#define MCSX_BBE_RX_SLAVE_BBE_INT ({	\
	u64 offset;			\
					\
+37 −0
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@

#include "mcs.h"
#include "rvu.h"
#include "mcs_reg.h"
#include "lmac_common.h"

#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
@@ -32,6 +33,42 @@ static struct _req_type __maybe_unused \
MBOX_UP_MCS_MESSAGES
#undef M

void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
{
	struct mcs *mcs;
	u64 cfg;
	u8 port;

	if (!rvu->mcs_blk_cnt)
		return;

	/* When ptp is enabled, RPM appends 8B header for all
	 * RX packets. MCS PEX need to configure to skip 8B
	 * during packet parsing.
	 */

	/* CNF10K-B */
	if (rvu->mcs_blk_cnt > 1) {
		mcs = mcs_get_pdata(rpm_id);
		cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
		if (ena)
			cfg |= BIT_ULL(lmac_id);
		else
			cfg &= ~BIT_ULL(lmac_id);
		mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
		return;
	}
	/* CN10KB */
	mcs = mcs_get_pdata(0);
	port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
	cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
	if (ena)
		cfg |= BIT_ULL(0);
	else
		cfg &= ~BIT_ULL(0);
	mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
}

int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
				       struct mcs_set_lmac_mode *req,
				       struct msg_rsp *rsp)
Loading