Commit 7875c018 authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge branch 'net-ethernet-mtk_wed-introduce-reset-support'

Lorenzo Bianconi says:

====================
net: ethernet: mtk_wed: introduce reset support

Introduce proper reset integration between ethernet and wlan drivers in order
to schedule wlan driver reset when ethernet/wed driver is resetting.
Introduce mtk_hw_reset_monitor work in order to detect possible DMA hangs.
====================

Link: https://lore.kernel.org/r/cover.1673715298.git.lorenzo@kernel.org


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 05cb8b39 08a764a7
Loading
Loading
Loading
Loading
+258 −39
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ static const struct mtk_reg_map mtk_reg_map = {
		.delay_irq	= 0x0a0c,
		.irq_status	= 0x0a20,
		.irq_mask	= 0x0a28,
		.adma_rx_dbg0	= 0x0a38,
		.int_grp	= 0x0a50,
	},
	.qdma = {
@@ -82,6 +83,8 @@ static const struct mtk_reg_map mtk_reg_map = {
		[0]		= 0x2800,
		[1]		= 0x2c00,
	},
	.pse_iq_sta		= 0x0110,
	.pse_oq_sta		= 0x0118,
};

static const struct mtk_reg_map mt7628_reg_map = {
@@ -112,6 +115,7 @@ static const struct mtk_reg_map mt7986_reg_map = {
		.delay_irq	= 0x620c,
		.irq_status	= 0x6220,
		.irq_mask	= 0x6228,
		.adma_rx_dbg0	= 0x6238,
		.int_grp	= 0x6250,
	},
	.qdma = {
@@ -143,6 +147,8 @@ static const struct mtk_reg_map mt7986_reg_map = {
		[0]		= 0x4800,
		[1]		= 0x4c00,
	},
	.pse_iq_sta		= 0x0180,
	.pse_oq_sta		= 0x01a0,
};

/* strings used by ethtool */
@@ -3030,14 +3036,29 @@ static void mtk_dma_free(struct mtk_eth *eth)
	kfree(eth->scratch_head);
}

static bool mtk_hw_reset_check(struct mtk_eth *eth)
{
	u32 val = mtk_r32(eth, MTK_INT_STATUS2);

	return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
	       (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
	       (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
}

static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;

	if (test_bit(MTK_RESETTING, &eth->state))
		return;

	if (!mtk_hw_reset_check(eth))
		return;

	eth->netdev[mac->id]->stats.tx_errors++;
	netif_err(eth, tx_err, dev,
		  "transmit timed out\n");
	netif_err(eth, tx_err, dev, "transmit timed out\n");

	schedule_work(&eth->pending_work);
}

@@ -3517,22 +3538,188 @@ static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
		mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}

static int mtk_hw_init(struct mtk_eth *eth)
static void mtk_hw_reset(struct mtk_eth *eth)
{
	u32 val;

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
		val = RSTCTRL_PPE0_V2;
	} else {
		val = RSTCTRL_PPE0;
	}

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
		val |= RSTCTRL_PPE1;

	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
			     0x3ffffff);
}

static u32 mtk_hw_reset_read(struct mtk_eth *eth)
{
	u32 val;

	regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
	return val;
}

static void mtk_hw_warm_reset(struct mtk_eth *eth)
{
	u32 rst_mask, val;

	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
			   RSTCTRL_FE);
	if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
				      val & RSTCTRL_FE, 1, 1000)) {
		dev_err(eth->dev, "warm reset failed\n");
		mtk_hw_reset(eth);
		return;
	}

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
	else
		rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
		rst_mask |= RSTCTRL_PPE1;

	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);

	udelay(1);
	val = mtk_hw_reset_read(eth);
	if (!(val & rst_mask))
		dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
			val, rst_mask);

	rst_mask |= RSTCTRL_FE;
	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);

	udelay(1);
	val = mtk_hw_reset_read(eth);
	if (val & rst_mask)
		dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
			val, rst_mask);
}

static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
{
	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
	bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
	bool oq_hang, cdm1_busy, adma_busy;
	bool wtx_busy, cdm_full, oq_free;
	u32 wdidx, val, gdm1_fc, gdm2_fc;
	bool qfsm_hang, qfwd_hang;
	bool ret = false;

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
		return false;

	/* WDMA sanity checks */
	wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);

	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
	wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);

	val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
	cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);

	oq_free  = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
		    !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));

	if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
		if (++eth->reset.wdma_hang_count > 2) {
			eth->reset.wdma_hang_count = 0;
			ret = true;
		}
		goto out;
	}

	/* QDMA sanity checks */
	qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
	qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);

	gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
	gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
	gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
	gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
	gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
	gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);

	if (qfsm_hang && qfwd_hang &&
	    ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
	     (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
		if (++eth->reset.qdma_hang_count > 2) {
			eth->reset.qdma_hang_count = 0;
			ret = true;
		}
		goto out;
	}

	/* ADMA sanity checks */
	oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
	cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
	adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
		    !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));

	if (oq_hang && cdm1_busy && adma_busy) {
		if (++eth->reset.adma_hang_count > 2) {
			eth->reset.adma_hang_count = 0;
			ret = true;
		}
		goto out;
	}

	eth->reset.wdma_hang_count = 0;
	eth->reset.qdma_hang_count = 0;
	eth->reset.adma_hang_count = 0;
out:
	eth->reset.wdidx = wdidx;

	return ret;
}

static void mtk_hw_reset_monitor_work(struct work_struct *work)
{
	struct delayed_work *del_work = to_delayed_work(work);
	struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
					   reset.monitor_work);

	if (test_bit(MTK_RESETTING, &eth->state))
		goto out;

	/* DMA stuck checks */
	if (mtk_hw_check_dma_hang(eth))
		schedule_work(&eth->pending_work);

out:
	schedule_delayed_work(&eth->reset.monitor_work,
			      MTK_DMA_MONITOR_TIMEOUT);
}

static int mtk_hw_init(struct mtk_eth *eth, bool reset)
{
	u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
		       ETHSYS_DMA_AG_MAP_PPE;
	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
	int i, val, ret;

	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
	if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
		return 0;

	if (!reset) {
		pm_runtime_enable(eth->dev);
		pm_runtime_get_sync(eth->dev);

		ret = mtk_clk_enable(eth);
		if (ret)
			goto err_disable_pm;
	}

	if (eth->ethsys)
		regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
@@ -3556,22 +3743,14 @@ static int mtk_hw_init(struct mtk_eth *eth)
		return 0;
	}

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
		val = RSTCTRL_PPE0_V2;
	} else {
		val = RSTCTRL_PPE0;
	}

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
		val |= RSTCTRL_PPE1;
	msleep(100);

	ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
	if (reset)
		mtk_hw_warm_reset(eth);
	else
		mtk_hw_reset(eth);

	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
		regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
			     0x3ffffff);

		/* Set FE to PDMAv2 if necessary */
		val = mtk_r32(eth, MTK_FE_GLO_MISC);
		mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
@@ -3673,8 +3852,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
	return 0;

err_disable_pm:
	if (!reset) {
		pm_runtime_put_sync(eth->dev);
		pm_runtime_disable(eth->dev);
	}

	return ret;
}
@@ -3753,52 +3934,86 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
	return -EOPNOTSUPP;
}

static void mtk_prepare_for_reset(struct mtk_eth *eth)
{
	u32 val;
	int i;

	/* disabe FE P3 and P4 */
	val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
		val |= MTK_FE_LINK_DOWN_P4;
	mtk_w32(eth, val, MTK_FE_GLO_CFG);

	/* adjust PPE configurations to prepare for reset */
	for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
		mtk_ppe_prepare_reset(eth->ppe[i]);

	/* disable NETSYS interrupts */
	mtk_w32(eth, 0, MTK_FE_INT_ENABLE);

	/* force link down GMAC */
	for (i = 0; i < 2; i++) {
		val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
		mtk_w32(eth, val, MTK_MAC_MCR(i));
	}
}

static void mtk_pending_work(struct work_struct *work)
{
	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
	int err, i;
	unsigned long restart = 0;
	u32 val;
	int i;

	rtnl_lock();

	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
	set_bit(MTK_RESETTING, &eth->state);

	mtk_prepare_for_reset(eth);
	mtk_wed_fe_reset();
	/* Run again reset preliminary configuration in order to avoid any
	 * possible race during FE reset since it can run releasing RTNL lock.
	 */
	mtk_prepare_for_reset(eth);

	/* stop all devices to make sure that dma is properly shut down */
	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i])
		if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
			continue;

		mtk_stop(eth->netdev[i]);
		__set_bit(i, &restart);
	}
	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);

	/* restart underlying hardware such as power, clock, pin mux
	 * and the connected phy
	 */
	mtk_hw_deinit(eth);
	usleep_range(15000, 16000);

	if (eth->dev->pins)
		pinctrl_select_state(eth->dev->pins->p,
				     eth->dev->pins->default_state);
	mtk_hw_init(eth);
	mtk_hw_init(eth, true);

	/* restart DMA and enable IRQs */
	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!test_bit(i, &restart))
			continue;
		err = mtk_open(eth->netdev[i]);
		if (err) {

		if (mtk_open(eth->netdev[i])) {
			netif_alert(eth, ifup, eth->netdev[i],
			      "Driver up/down cycle failed, closing device.\n");
				    "Driver up/down cycle failed\n");
			dev_close(eth->netdev[i]);
		}
	}

	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
	/* enabe FE P3 and P4 */
	val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
	if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
		val &= ~MTK_FE_LINK_DOWN_P4;
	mtk_w32(eth, val, MTK_FE_GLO_CFG);

	clear_bit(MTK_RESETTING, &eth->state);

	mtk_wed_fe_reset_complete();

	rtnl_unlock();
}

@@ -3843,6 +4058,7 @@ static int mtk_cleanup(struct mtk_eth *eth)
	mtk_unreg_dev(eth);
	mtk_free_dev(eth);
	cancel_work_sync(&eth->pending_work);
	cancel_delayed_work_sync(&eth->reset.monitor_work);

	return 0;
}
@@ -4297,6 +4513,7 @@ static int mtk_probe(struct platform_device *pdev)

	eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
	INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
	INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);

	eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
	INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
@@ -4410,7 +4627,7 @@ static int mtk_probe(struct platform_device *pdev)
	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
	INIT_WORK(&eth->pending_work, mtk_pending_work);

	err = mtk_hw_init(eth);
	err = mtk_hw_init(eth, false);
	if (err)
		goto err_wed_exit;

@@ -4499,6 +4716,8 @@ static int mtk_probe(struct platform_device *pdev)
	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);

	platform_set_drvdata(pdev, eth);
	schedule_delayed_work(&eth->reset.monitor_work,
			      MTK_DMA_MONITOR_TIMEOUT);

	return 0;

+38 −0
Original line number Diff line number Diff line
@@ -77,12 +77,24 @@
#define	MTK_HW_LRO_REPLACE_DELTA	1000
#define	MTK_HW_LRO_SDL_REMAIN_ROOM	1522

/* Frame Engine Global Configuration */
#define MTK_FE_GLO_CFG		0x00
#define MTK_FE_LINK_DOWN_P3	BIT(11)
#define MTK_FE_LINK_DOWN_P4	BIT(12)

/* Frame Engine Global Reset Register */
#define MTK_RST_GL		0x04
#define RST_GL_PSE		BIT(0)

/* Frame Engine Interrupt Status Register */
#define MTK_INT_STATUS2		0x08
#define MTK_FE_INT_ENABLE	0x0c
#define MTK_FE_INT_FQ_EMPTY	BIT(8)
#define MTK_FE_INT_TSO_FAIL	BIT(12)
#define MTK_FE_INT_TSO_ILLEGAL	BIT(13)
#define MTK_FE_INT_TSO_ALIGN	BIT(14)
#define MTK_FE_INT_RFIFO_OV	BIT(18)
#define MTK_FE_INT_RFIFO_UF	BIT(19)
#define MTK_GDM1_AF		BIT(28)
#define MTK_GDM2_AF		BIT(29)

@@ -272,6 +284,8 @@

#define MTK_RX_DONE_INT_V2	BIT(14)

#define MTK_CDM_TXFIFO_RDY	BIT(7)

/* QDMA Interrupt grouping registers */
#define MTK_RLS_DONE_INT	BIT(0)

@@ -562,6 +576,17 @@
#define MT7628_SDM_RBCNT	(MT7628_SDM_OFFSET + 0x10c)
#define MT7628_SDM_CS_ERR	(MT7628_SDM_OFFSET + 0x110)

#define MTK_FE_CDM1_FSM		0x220
#define MTK_FE_CDM2_FSM		0x224
#define MTK_FE_CDM3_FSM		0x238
#define MTK_FE_CDM4_FSM		0x298
#define MTK_FE_CDM5_FSM		0x318
#define MTK_FE_CDM6_FSM		0x328
#define MTK_FE_GDM1_FSM		0x228
#define MTK_FE_GDM2_FSM		0x22C

#define MTK_MAC_FSM(x)		(0x1010C + ((x) * 0x100))

struct mtk_rx_dma {
	unsigned int rxd1;
	unsigned int rxd2;
@@ -958,6 +983,7 @@ struct mtk_reg_map {
		u32	delay_irq;	/* delay interrupt */
		u32	irq_status;	/* interrupt status */
		u32	irq_mask;	/* interrupt mask */
		u32	adma_rx_dbg0;
		u32	int_grp;
	} pdma;
	struct {
@@ -986,6 +1012,8 @@ struct mtk_reg_map {
	u32	gdma_to_ppe;
	u32	ppe_base;
	u32	wdma_base[2];
	u32	pse_iq_sta;
	u32	pse_oq_sta;
};

/* struct mtk_eth_data -	This is the structure holding all differences
@@ -1028,6 +1056,8 @@ struct mtk_soc_data {
	} txrx;
};

#define MTK_DMA_MONITOR_TIMEOUT		msecs_to_jiffies(1000)

/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS			2

@@ -1152,6 +1182,14 @@ struct mtk_eth {
	struct rhashtable		flow_table;

	struct bpf_prog			__rcu *prog;

	struct {
		struct delayed_work monitor_work;
		u32 wdidx;
		u8 wdma_hang_count;
		u8 qdma_hang_count;
		u8 adma_hang_count;
	} reset;
};

/* struct mtk_mac -	the structure that holds the info about the MACs of the
+27 −0
Original line number Diff line number Diff line
@@ -730,6 +730,33 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
}

int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
{
	if (!ppe)
		return -EINVAL;

	/* disable KA */
	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
	ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
	usleep_range(10000, 11000);

	/* set KA timer to maximum */
	ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
	ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);

	/* set KA tick select */
	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
	ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
	usleep_range(10000, 11000);

	/* disable scan mode */
	ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
	usleep_range(10000, 11000);

	return mtk_ppe_wait_busy(ppe);
}

struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
			     int version, int index)
{
+1 −0
Original line number Diff line number Diff line
@@ -309,6 +309,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);

void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);

+6 −0
Original line number Diff line number Diff line
@@ -58,6 +58,12 @@
#define MTK_PPE_TB_CFG_SCAN_MODE		GENMASK(17, 16)
#define MTK_PPE_TB_CFG_HASH_DEBUG		GENMASK(19, 18)
#define MTK_PPE_TB_CFG_INFO_SEL			BIT(20)
#define MTK_PPE_TB_TICK_SEL			BIT(24)

#define MTK_PPE_BIND_LMT1			0x230
#define MTK_PPE_NTU_KEEPALIVE			GENMASK(23, 16)

#define MTK_PPE_KEEPALIVE			0x234

enum {
	MTK_PPE_SCAN_MODE_DISABLED,
Loading