Commit d54e1348 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'stmmac-multivector-msi'



Voon Weifeng says:

====================
net: stmmac: enable multi-vector MSI

This patchset adds support for multi MSI interrupts in addition to
current single common interrupt implementation. Each MSI interrupt is tied
to a newly introduce interrupt service routine(ISR). Hence, each interrupt
will only go through the corresponding ISR.

In order to increase the efficiency, enabling multi MSI interrupt will
automatically select the interrupt mode configuration INTM=1. When INTM=1,
the TX/RX transfer complete signal will only asserted on corresponding
sbd_perch_tx_intr_o[] or sbd_perch_rx_intr_o[] without asserting signal
on the common sbd_intr_o. Hence, for each TX/RX interrupts, only the
corresponding ISR will be triggered.

Every vendor might have different MSI vector assignment. So, this patchset
only includes multi-vector MSI assignment for Intel platform.

Changes:
v1 -> v2
 patch 2/5
 -Remove defensive check for invalid dev pointer
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6c996e19 6ccf12ae
Loading
Loading
Loading
Loading
+21 −0
Original line number Diff line number Diff line
@@ -259,6 +259,9 @@ struct stmmac_safety_stats {
#define DMA_HW_FEAT_ACTPHYIF	0x70000000	/* Active/selected PHY iface */
#define DEFAULT_DMA_PBL		8

/* MSI defines */
#define STMMAC_MSI_VEC_MAX	32

/* PCS status and mask defines */
#define	PCS_ANE_IRQ		BIT(2)	/* PCS Auto-Negotiation */
#define	PCS_LINK_IRQ		BIT(1)	/* PCS Link */
@@ -309,6 +312,24 @@ enum dma_irq_status {
	handle_tx = 0x8,
};

enum dma_irq_dir {
	DMA_DIR_RX = 0x1,
	DMA_DIR_TX = 0x2,
	DMA_DIR_RXTX = 0x3,
};

enum request_irq_err {
	REQ_IRQ_ERR_ALL,
	REQ_IRQ_ERR_TX,
	REQ_IRQ_ERR_RX,
	REQ_IRQ_ERR_SFTY_UE,
	REQ_IRQ_ERR_SFTY_CE,
	REQ_IRQ_ERR_LPI,
	REQ_IRQ_ERR_WOL,
	REQ_IRQ_ERR_MAC,
	REQ_IRQ_ERR_NO,
};

/* EEE and LPI defines */
#define	CORE_IRQ_TX_PATH_IN_LPI_MODE	(1 << 0)
#define	CORE_IRQ_TX_PATH_EXIT_LPI_MODE	(1 << 1)
+111 −9
Original line number Diff line number Diff line
@@ -492,6 +492,14 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
	plat->has_crossts = true;
	plat->crosststamp = intel_crosststamp;

	/* Setup MSI vector offset specific to Intel mGbE controller */
	plat->msi_mac_vec = 29;
	plat->msi_lpi_vec = 28;
	plat->msi_sfty_ce_vec = 27;
	plat->msi_sfty_ue_vec = 26;
	plat->msi_rx_base_vec = 0;
	plat->msi_tx_base_vec = 1;

	return 0;
}

@@ -776,6 +784,79 @@ static const struct stmmac_pci_info quark_info = {
	.setup = quark_default_data,
};

static int stmmac_config_single_msi(struct pci_dev *pdev,
				    struct plat_stmmacenet_data *plat,
				    struct stmmac_resources *res)
{
	int ret;

	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
	if (ret < 0) {
		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
			 __func__);
		return ret;
	}

	res->irq = pci_irq_vector(pdev, 0);
	res->wol_irq = res->irq;
	plat->multi_msi_en = 0;
	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
		 __func__);

	return 0;
}

static int stmmac_config_multi_msi(struct pci_dev *pdev,
				   struct plat_stmmacenet_data *plat,
				   struct stmmac_resources *res)
{
	int ret;
	int i;

	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
			 __func__);
		return -1;
	}

	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
	if (ret < 0) {
		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
			 __func__);
		return ret;
	}

	/* For RX MSI */
	for (i = 0; i < plat->rx_queues_to_use; i++) {
		res->rx_irq[i] = pci_irq_vector(pdev,
						plat->msi_rx_base_vec + i * 2);
	}

	/* For TX MSI */
	for (i = 0; i < plat->tx_queues_to_use; i++) {
		res->tx_irq[i] = pci_irq_vector(pdev,
						plat->msi_tx_base_vec + i * 2);
	}

	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);

	plat->multi_msi_en = 1;
	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);

	return 0;
}

/**
 * intel_eth_pci_probe
 *
@@ -833,18 +914,24 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
	plat->bsp_priv = intel_priv;
	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;

	/* Initialize all MSI vectors to invalid so that it can be set
	 * according to platform data settings below.
	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
	 */
	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;

	ret = info->setup(pdev, plat);
	if (ret)
		return ret;

	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
	if (ret < 0)
		return ret;

	memset(&res, 0, sizeof(res));
	res.addr = pcim_iomap_table(pdev)[0];
	res.wol_irq = pci_irq_vector(pdev, 0);
	res.irq = pci_irq_vector(pdev, 0);

	if (plat->eee_usecs_rate > 0) {
		u32 tx_lpi_usec;
@@ -853,13 +940,28 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
	}

	ret = stmmac_config_multi_msi(pdev, plat, &res);
	if (ret) {
		ret = stmmac_config_single_msi(pdev, plat, &res);
		if (ret) {
			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
				__func__);
			goto err_alloc_irq;
		}
	}

	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
	if (ret) {
		goto err_dvr_probe;
	}

	return 0;

err_dvr_probe:
	pci_free_irq_vectors(pdev);
err_alloc_irq:
	clk_disable_unprepare(plat->stmmac_clk);
	clk_unregister_fixed_rate(plat->stmmac_clk);
	}

	return ret;
}

+23 −1
Original line number Diff line number Diff line
@@ -239,6 +239,22 @@ static const struct emac_variant emac_variant_h6 = {
#define EMAC_RX_EARLY_INT       BIT(13)
#define EMAC_RGMII_STA_INT      BIT(16)

#define EMAC_INT_MSK_COMMON	EMAC_RGMII_STA_INT
#define EMAC_INT_MSK_TX		(EMAC_TX_INT | \
				 EMAC_TX_DMA_STOP_INT | \
				 EMAC_TX_BUF_UA_INT | \
				 EMAC_TX_TIMEOUT_INT | \
				 EMAC_TX_UNDERFLOW_INT | \
				 EMAC_TX_EARLY_INT |\
				 EMAC_INT_MSK_COMMON)
#define EMAC_INT_MSK_RX		(EMAC_RX_INT | \
				 EMAC_RX_BUF_UA_INT | \
				 EMAC_RX_DMA_STOP_INT | \
				 EMAC_RX_TIMEOUT_INT | \
				 EMAC_RX_OVERFLOW_INT | \
				 EMAC_RX_EARLY_INT | \
				 EMAC_INT_MSK_COMMON)

#define MAC_ADDR_TYPE_DST BIT(31)

/* H3 specific bits for EPHY */
@@ -412,13 +428,19 @@ static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
}

static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
				     struct stmmac_extra_stats *x, u32 chan)
				     struct stmmac_extra_stats *x, u32 chan,
				     u32 dir)
{
	u32 v;
	int ret = 0;

	v = readl(ioaddr + EMAC_INT_STA);

	if (dir == DMA_DIR_RX)
		v &= EMAC_INT_MSK_RX;
	else if (dir == DMA_DIR_TX)
		v &= EMAC_INT_MSK_TX;

	if (v & EMAC_TX_INT) {
		ret |= handle_tx;
		x->tx_normal_irq_n++;
+7 −0
Original line number Diff line number Diff line
@@ -161,6 +161,13 @@ static void dwmac4_dma_init(void __iomem *ioaddr,
		value |= DMA_SYS_BUS_EAME;

	writel(value, ioaddr + DMA_SYS_BUS_MODE);

	if (dma_cfg->multi_msi_en) {
		value = readl(ioaddr + DMA_BUS_MODE);
		value &= ~DMA_BUS_MODE_INTM_MASK;
		value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT);
		writel(value, ioaddr + DMA_BUS_MODE);
	}
}

static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
+23 −1
Original line number Diff line number Diff line
@@ -25,6 +25,9 @@
#define DMA_TBS_CTRL			0x00001050

/* DMA Bus Mode bitmap */
#define DMA_BUS_MODE_INTM_MASK		GENMASK(17, 16)
#define DMA_BUS_MODE_INTM_SHIFT		16
#define DMA_BUS_MODE_INTM_MODE1		0x1
#define DMA_BUS_MODE_SFT_RESET		BIT(0)

/* DMA SYS Bus Mode bitmap */
@@ -149,6 +152,25 @@
#define DMA_CHAN_STATUS_TPS		BIT(1)
#define DMA_CHAN_STATUS_TI		BIT(0)

#define DMA_CHAN_STATUS_MSK_COMMON	(DMA_CHAN_STATUS_NIS | \
					 DMA_CHAN_STATUS_AIS | \
					 DMA_CHAN_STATUS_CDE | \
					 DMA_CHAN_STATUS_FBE)

#define DMA_CHAN_STATUS_MSK_RX		(DMA_CHAN_STATUS_REB | \
					 DMA_CHAN_STATUS_ERI | \
					 DMA_CHAN_STATUS_RWT | \
					 DMA_CHAN_STATUS_RPS | \
					 DMA_CHAN_STATUS_RBU | \
					 DMA_CHAN_STATUS_RI | \
					 DMA_CHAN_STATUS_MSK_COMMON)

#define DMA_CHAN_STATUS_MSK_TX		(DMA_CHAN_STATUS_ETI | \
					 DMA_CHAN_STATUS_TBU | \
					 DMA_CHAN_STATUS_TPS | \
					 DMA_CHAN_STATUS_TI | \
					 DMA_CHAN_STATUS_MSK_COMMON)

/* Interrupt enable bits per channel */
#define DMA_CHAN_INTR_ENA_NIE		BIT(16)
#define DMA_CHAN_INTR_ENA_AIE		BIT(15)
@@ -206,7 +228,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
int dwmac4_dma_interrupt(void __iomem *ioaddr,
			 struct stmmac_extra_stats *x, u32 chan);
			 struct stmmac_extra_stats *x, u32 chan, u32 dir);
void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
Loading