Commit 5f3b8ace authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'add-functional-support-for-gigabit-ethernet-driver'

Biju Das says:

====================
Add functional support for Gigabit Ethernet driver

The DMAC and EMAC blocks of Gigabit Ethernet IP found on RZ/G2L SoC are
similar to the R-Car Ethernet AVB IP.

The Gigabit Ethernet IP consists of Ethernet controller (E-MAC), Internal
TCP/IP Offload Engine (TOE)  and Dedicated Direct memory access controller
(DMAC).

With a few changes in the driver we can support both IPs.

This patch series is aims to add functional support for Gigabit Ethernet
driver by filling all the stubs except set_features.

set_feature patch will send as separate RFC patch along with rx_checksum
patch, as it needs further discussion related to HW checksum.

With this series, we can do boot kernel with rootFS mounted on NFS on
RZ/G2L platforms.
====================

Link: https://lore.kernel.org/r/20211012163613.30030-1-biju.das.jz@bp.renesas.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 50515cac 94040926
Loading
Loading
Loading
Loading
+16 −3
Original line number Diff line number Diff line
@@ -196,12 +196,15 @@ enum ravb_reg {
	MAHR	= 0x05c0,
	MALR	= 0x05c8,
	TROCR	= 0x0700,	/* R-Car Gen3 and RZ/G2L only */
	CXR41	= 0x0708,	/* RZ/G2L only */
	CXR42	= 0x0710,	/* RZ/G2L only */
	CEFCR	= 0x0740,
	FRECR	= 0x0748,
	TSFRCR	= 0x0750,
	TLFRCR	= 0x0758,
	RFCR	= 0x0760,
	MAFCR	= 0x0778,
	CSR0    = 0x0800,	/* RZ/G2L only */
};


@@ -826,7 +829,7 @@ enum ECSR_BIT {
	ECSR_MPD	= 0x00000002,
	ECSR_LCHNG	= 0x00000004,
	ECSR_PHYI	= 0x00000008,
	ECSR_PFRI	= 0x00000010,
	ECSR_PFRI	= 0x00000010,	/* Documented for R-Car Gen3 and RZ/G2L */
};

/* ECSIPR */
@@ -962,6 +965,11 @@ enum CXR31_BIT {
	CXR31_SEL_LINK1	= 0x00000008,
};

enum CSR0_BIT {
	CSR0_TPE	= 0x00000010,
	CSR0_RPE	= 0x00000020,
};

#define DBAT_ENTRY_NUM	22
#define RX_QUEUE_OFFSET	4
#define NUM_RX_QUEUE	2
@@ -970,6 +978,7 @@ enum CXR31_BIT {
#define RX_BUF_SZ	(2048 - ETH_FCS_LEN + sizeof(__sum16))

#define GBETH_RX_BUFF_MAX 8192
#define GBETH_RX_DESC_DATA_SIZE 4080

struct ravb_tstamp_skb {
	struct list_head list;
@@ -1009,16 +1018,18 @@ struct ravb_hw_info {
	netdev_features_t net_features;
	int stats_len;
	size_t max_rx_len;
	u32 tsrq;
	u32 tccr_mask;
	u32 rx_max_buf_size;
	unsigned aligned_tx: 1;

	/* hardware features */
	unsigned internal_delay:1;	/* AVB-DMAC has internal delays */
	unsigned tx_counters:1;		/* E-MAC has TX counters */
	unsigned carrier_counters:1;	/* E-MAC has carrier counters */
	unsigned multi_irqs:1;		/* AVB-DMAC and E-MAC has multiple irqs */
	unsigned gptp:1;		/* AVB-DMAC has gPTP support */
	unsigned ccc_gac:1;		/* AVB-DMAC has gPTP support active in config mode */
	unsigned nc_queue:1;		/* AVB-DMAC has NC queue */
	unsigned nc_queues:1;		/* AVB-DMAC has RX and TX NC queues */
	unsigned magic_pkt:1;		/* E-MAC supports magic packet detection */
	unsigned half_duplex:1;		/* E-MAC supports half duplex mode */
};
@@ -1037,9 +1048,11 @@ struct ravb_private {
	struct ravb_desc *desc_bat;
	dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
	dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
	struct ravb_rx_desc *gbeth_rx_ring;
	struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
	struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
	void *tx_align[NUM_TX_QUEUE];
	struct sk_buff *rx_1st_skb;
	struct sk_buff **rx_skb[NUM_RX_QUEUE];
	struct sk_buff **tx_skb[NUM_TX_QUEUE];
	u32 rx_over_errors;
+279 −49
Original line number Diff line number Diff line
@@ -236,10 +236,30 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)

static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
{
	/* Place holder */
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned int ring_size;
	unsigned int i;

	if (!priv->gbeth_rx_ring)
		return;

	for (i = 0; i < priv->num_rx_ring[q]; i++) {
		struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];

		if (!dma_mapping_error(ndev->dev.parent,
				       le32_to_cpu(desc->dptr)))
			dma_unmap_single(ndev->dev.parent,
					 le32_to_cpu(desc->dptr),
					 GBETH_RX_BUFF_MAX,
					 DMA_FROM_DEVICE);
	}
	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
	dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
			  priv->rx_desc_dma[q]);
	priv->gbeth_rx_ring = NULL;
}

static void ravb_rx_ring_free(struct net_device *ndev, int q)
static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned int ring_size;
@@ -307,10 +327,36 @@ static void ravb_ring_free(struct net_device *ndev, int q)

static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
{
	/* Place holder */
	struct ravb_private *priv = netdev_priv(ndev);
	struct ravb_rx_desc *rx_desc;
	unsigned int rx_ring_size;
	dma_addr_t dma_addr;
	unsigned int i;

	rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
	memset(priv->gbeth_rx_ring, 0, rx_ring_size);
	/* Build RX ring buffer */
	for (i = 0; i < priv->num_rx_ring[q]; i++) {
		/* RX descriptor */
		rx_desc = &priv->gbeth_rx_ring[i];
		rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
					  GBETH_RX_BUFF_MAX,
					  DMA_FROM_DEVICE);
		/* We just set the data size to 0 for a failed mapping which
		 * should prevent DMA from happening...
		 */
		if (dma_mapping_error(ndev->dev.parent, dma_addr))
			rx_desc->ds_cc = cpu_to_le16(0);
		rx_desc->dptr = cpu_to_le32(dma_addr);
		rx_desc->die_dt = DT_FEMPTY;
	}
	rx_desc = &priv->gbeth_rx_ring[i];
	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
	rx_desc->die_dt = DT_LINKFIX; /* type */
}

static void ravb_rx_ring_format(struct net_device *ndev, int q)
static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
	struct ravb_ex_rx_desc *rx_desc;
@@ -385,11 +431,18 @@ static void ravb_ring_format(struct net_device *ndev, int q)

static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
{
	/* Place holder */
	return NULL;
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned int ring_size;

	ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);

	priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
						 &priv->rx_desc_dma[q],
						 GFP_KERNEL);
	return priv->gbeth_rx_ring;
}

static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned int ring_size;
@@ -466,10 +519,10 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
	/* Receive frame limit set register */
	ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);

	/* PAUSE prohibition */
	/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
	ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
			 ECMR_TE | ECMR_RE | ECMR_RCPT |
			 ECMR_TXF | ECMR_RXF | ECMR_PRM, ECMR);
			 ECMR_TXF | ECMR_RXF, ECMR);

	ravb_set_rate_gbeth(ndev);

@@ -481,12 +534,12 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)

	/* E-MAC status register clear */
	ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
	ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);

	/* E-MAC interrupt enable register */
	ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);

	ravb_modify(ndev, CXR31, CXR31_SEL_LINK1, 0);
	ravb_modify(ndev, CXR31, CXR31_SEL_LINK0, CXR31_SEL_LINK0);
	ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
}

static void ravb_emac_init_rcar(struct net_device *ndev)
@@ -535,7 +588,7 @@ static int ravb_dmac_init_gbeth(struct net_device *ndev)
	/* Descriptor format */
	ravb_ring_format(ndev, RAVB_BE);

	/* Set AVB RX */
	/* Set DMAC RX */
	ravb_write(ndev, 0x60000000, RCR);

	/* Set Max Frame Length (RTC) */
@@ -681,15 +734,151 @@ static void ravb_rx_csum(struct sk_buff *skb)
	skb_trim(skb, skb->len - sizeof(__sum16));
}

static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
					  struct ravb_rx_desc *desc)
{
	struct ravb_private *priv = netdev_priv(ndev);
	struct sk_buff *skb;

	skb = priv->rx_skb[RAVB_BE][entry];
	priv->rx_skb[RAVB_BE][entry] = NULL;
	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
			 ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);

	return skb;
}

/* Packet receive function for Gigabit Ethernet */
static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
{
	/* Place holder */
	return true;
	struct ravb_private *priv = netdev_priv(ndev);
	const struct ravb_hw_info *info = priv->info;
	struct net_device_stats *stats;
	struct ravb_rx_desc *desc;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	u8  desc_status;
	int boguscnt;
	u16 pkt_len;
	u8  die_dt;
	int entry;
	int limit;

	entry = priv->cur_rx[q] % priv->num_rx_ring[q];
	boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
	stats = &priv->stats[q];

	boguscnt = min(boguscnt, *quota);
	limit = boguscnt;
	desc = &priv->gbeth_rx_ring[entry];
	while (desc->die_dt != DT_FEMPTY) {
		/* Descriptor type must be checked before all other reads */
		dma_rmb();
		desc_status = desc->msc;
		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;

		if (--boguscnt < 0)
			break;

		/* We use 0-byte descriptors to mark the DMA mapping errors */
		if (!pkt_len)
			continue;

		if (desc_status & MSC_MC)
			stats->multicast++;

		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
			stats->rx_errors++;
			if (desc_status & MSC_CRC)
				stats->rx_crc_errors++;
			if (desc_status & MSC_RFE)
				stats->rx_frame_errors++;
			if (desc_status & (MSC_RTLF | MSC_RTSF))
				stats->rx_length_errors++;
			if (desc_status & MSC_CEEF)
				stats->rx_missed_errors++;
		} else {
			die_dt = desc->die_dt & 0xF0;
			switch (die_dt) {
			case DT_FSINGLE:
				skb = ravb_get_skb_gbeth(ndev, entry, desc);
				skb_put(skb, pkt_len);
				skb->protocol = eth_type_trans(skb, ndev);
				napi_gro_receive(&priv->napi[q], skb);
				stats->rx_packets++;
				stats->rx_bytes += pkt_len;
				break;
			case DT_FSTART:
				priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
				skb_put(priv->rx_1st_skb, pkt_len);
				break;
			case DT_FMID:
				skb = ravb_get_skb_gbeth(ndev, entry, desc);
				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
							       priv->rx_1st_skb->len,
							       skb->data,
							       pkt_len);
				skb_put(priv->rx_1st_skb, pkt_len);
				dev_kfree_skb(skb);
				break;
			case DT_FEND:
				skb = ravb_get_skb_gbeth(ndev, entry, desc);
				skb_copy_to_linear_data_offset(priv->rx_1st_skb,
							       priv->rx_1st_skb->len,
							       skb->data,
							       pkt_len);
				skb_put(priv->rx_1st_skb, pkt_len);
				dev_kfree_skb(skb);
				priv->rx_1st_skb->protocol =
					eth_type_trans(priv->rx_1st_skb, ndev);
				napi_gro_receive(&priv->napi[q],
						 priv->rx_1st_skb);
				stats->rx_packets++;
				stats->rx_bytes += priv->rx_1st_skb->len;
				break;
			}
		}

		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
		desc = &priv->gbeth_rx_ring[entry];
	}

	/* Refill the RX ring buffers. */
	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
		desc = &priv->gbeth_rx_ring[entry];
		desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);

		if (!priv->rx_skb[q][entry]) {
			skb = netdev_alloc_skb(ndev, info->max_rx_len);
			if (!skb)
				break;
			ravb_set_buffer_align(skb);
			dma_addr = dma_map_single(ndev->dev.parent,
						  skb->data,
						  GBETH_RX_BUFF_MAX,
						  DMA_FROM_DEVICE);
			skb_checksum_none_assert(skb);
			/* We just set the data size to 0 for a failed mapping
			 * which should prevent DMA  from happening...
			 */
			if (dma_mapping_error(ndev->dev.parent, dma_addr))
				desc->ds_cc = cpu_to_le16(0);
			desc->dptr = cpu_to_le32(dma_addr);
			priv->rx_skb[q][entry] = skb;
		}
		/* Descriptor type must be set after all the above writes */
		dma_wmb();
		desc->die_dt = DT_FEMPTY;
	}

	*quota -= limit - (++boguscnt);

	return boguscnt <= 0;
}

/* Packet receive function for Ethernet AVB */
static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q)
static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
	const struct ravb_hw_info *info = priv->info;
@@ -832,7 +1021,7 @@ static int ravb_stop_dma(struct net_device *ndev)
	int error;

	/* Wait for stopping the hardware TX process */
	error = ravb_wait(ndev, TCCR, info->tsrq, 0);
	error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);

	if (error)
		return error;
@@ -988,7 +1177,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
			result = IRQ_HANDLED;

		/* Network control and best effort queue RX/TX */
		if (info->nc_queue) {
		if (info->nc_queues) {
			for (q = RAVB_NC; q >= RAVB_BE; q--) {
				if (ravb_queue_interrupt(ndev, q))
					result = IRQ_HANDLED;
@@ -1084,16 +1273,25 @@ static int ravb_poll(struct napi_struct *napi, int budget)
	struct net_device *ndev = napi->dev;
	struct ravb_private *priv = netdev_priv(ndev);
	const struct ravb_hw_info *info = priv->info;
	bool gptp = info->gptp || info->ccc_gac;
	struct ravb_rx_desc *desc;
	unsigned long flags;
	int q = napi - priv->napi;
	int mask = BIT(q);
	int quota = budget;
	unsigned int entry;

	if (!gptp) {
		entry = priv->cur_rx[q] % priv->num_rx_ring[q];
		desc = &priv->gbeth_rx_ring[entry];
	}
	/* Processing RX Descriptor Ring */
	/* Clear RX interrupt */
	ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
	if (gptp || desc->die_dt != DT_FEMPTY) {
		if (ravb_rx(ndev, &quota, q))
			goto out;
	}

	/* Processing TX Descriptor Ring */
	spin_lock_irqsave(&priv->lock, flags);
@@ -1118,7 +1316,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)

	/* Receive error message handling */
	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
	if (info->nc_queue)
	if (info->nc_queues)
		priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
		ndev->stats.rx_over_errors = priv->rx_over_errors;
@@ -1295,6 +1493,24 @@ static void ravb_set_msglevel(struct net_device *ndev, u32 value)
	priv->msg_enable = value;
}

static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
	"rx_queue_0_current",
	"tx_queue_0_current",
	"rx_queue_0_dirty",
	"tx_queue_0_dirty",
	"rx_queue_0_packets",
	"tx_queue_0_packets",
	"rx_queue_0_bytes",
	"tx_queue_0_bytes",
	"rx_queue_0_mcast_packets",
	"rx_queue_0_errors",
	"rx_queue_0_crc_errors",
	"rx_queue_0_frame_errors",
	"rx_queue_0_length_errors",
	"rx_queue_0_csum_offload_errors",
	"rx_queue_0_over_errors",
};

static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
	"rx_queue_0_current",
	"tx_queue_0_current",
@@ -1351,7 +1567,7 @@ static void ravb_get_ethtool_stats(struct net_device *ndev,
	int i = 0;
	int q;

	num_rx_q = info->nc_queue ? NUM_RX_QUEUE : 1;
	num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
	/* Device-specific stats */
	for (q = RAVB_BE; q < num_rx_q; q++) {
		struct net_device_stats *stats = &priv->stats[q];
@@ -1428,7 +1644,7 @@ static int ravb_set_ringparam(struct net_device *ndev,

		/* Free all the skb's in the RX queue and the DMA buffers. */
		ravb_ring_free(ndev, RAVB_BE);
		if (info->nc_queue)
		if (info->nc_queues)
			ravb_ring_free(ndev, RAVB_NC);
	}

@@ -1548,7 +1764,7 @@ static int ravb_open(struct net_device *ndev)
	int error;

	napi_enable(&priv->napi[RAVB_BE]);
	if (info->nc_queue)
	if (info->nc_queues)
		napi_enable(&priv->napi[RAVB_NC]);

	if (!info->multi_irqs) {
@@ -1623,7 +1839,7 @@ static int ravb_open(struct net_device *ndev)
out_free_irq:
	free_irq(ndev->irq, ndev);
out_napi_off:
	if (info->nc_queue)
	if (info->nc_queues)
		napi_disable(&priv->napi[RAVB_NC]);
	napi_disable(&priv->napi[RAVB_BE]);
	return error;
@@ -1673,7 +1889,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
	}

	ravb_ring_free(ndev, RAVB_BE);
	if (info->nc_queue)
	if (info->nc_queues)
		ravb_ring_free(ndev, RAVB_NC);

	/* Device init */
@@ -1855,6 +2071,13 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
		ravb_write(ndev, 0, TROCR);	/* (write clear) */
	}

	if (info->carrier_counters) {
		nstats->collisions += ravb_read(ndev, CXR41);
		ravb_write(ndev, 0, CXR41);	/* (write clear) */
		nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
		ravb_write(ndev, 0, CXR42);	/* (write clear) */
	}

	nstats->rx_packets = stats0->rx_packets;
	nstats->tx_packets = stats0->tx_packets;
	nstats->rx_bytes = stats0->rx_bytes;
@@ -1866,7 +2089,7 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
	nstats->rx_length_errors = stats0->rx_length_errors;
	nstats->rx_missed_errors = stats0->rx_missed_errors;
	nstats->rx_over_errors = stats0->rx_over_errors;
	if (info->nc_queue) {
	if (info->nc_queues) {
		stats1 = &priv->stats[RAVB_NC];

		nstats->rx_packets += stats1->rx_packets;
@@ -1947,13 +2170,13 @@ static int ravb_close(struct net_device *ndev)
	}
	free_irq(ndev->irq, ndev);

	if (info->nc_queue)
	if (info->nc_queues)
		napi_disable(&priv->napi[RAVB_NC]);
	napi_disable(&priv->napi[RAVB_BE]);

	/* Free all the skb's in the RX queue and the DMA buffers. */
	ravb_ring_free(ndev, RAVB_BE);
	if (info->nc_queue)
	if (info->nc_queues)
		ravb_ring_free(ndev, RAVB_NC);

	return 0;
@@ -2173,10 +2396,10 @@ static int ravb_mdio_release(struct ravb_private *priv)
}

static const struct ravb_hw_info ravb_gen3_hw_info = {
	.rx_ring_free = ravb_rx_ring_free,
	.rx_ring_format = ravb_rx_ring_format,
	.alloc_rx_desc = ravb_alloc_rx_desc,
	.receive = ravb_rcar_rx,
	.rx_ring_free = ravb_rx_ring_free_rcar,
	.rx_ring_format = ravb_rx_ring_format_rcar,
	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
	.receive = ravb_rx_rcar,
	.set_rate = ravb_set_rate_rcar,
	.set_feature = ravb_set_features_rcar,
	.dmac_init = ravb_dmac_init_rcar,
@@ -2187,20 +2410,21 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
	.net_features = NETIF_F_RXCSUM,
	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
	.tsrq = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
	.rx_max_buf_size = SZ_2K,
	.internal_delay = 1,
	.tx_counters = 1,
	.multi_irqs = 1,
	.ccc_gac = 1,
	.nc_queue = 1,
	.nc_queues = 1,
	.magic_pkt = 1,
};

static const struct ravb_hw_info ravb_gen2_hw_info = {
	.rx_ring_free = ravb_rx_ring_free,
	.rx_ring_format = ravb_rx_ring_format,
	.alloc_rx_desc = ravb_alloc_rx_desc,
	.receive = ravb_rcar_rx,
	.rx_ring_free = ravb_rx_ring_free_rcar,
	.rx_ring_format = ravb_rx_ring_format_rcar,
	.alloc_rx_desc = ravb_alloc_rx_desc_rcar,
	.receive = ravb_rx_rcar,
	.set_rate = ravb_set_rate_rcar,
	.set_feature = ravb_set_features_rcar,
	.dmac_init = ravb_dmac_init_rcar,
@@ -2211,10 +2435,11 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
	.net_features = NETIF_F_RXCSUM,
	.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
	.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
	.tsrq = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
	.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
	.rx_max_buf_size = SZ_2K,
	.aligned_tx = 1,
	.gptp = 1,
	.nc_queue = 1,
	.nc_queues = 1,
	.magic_pkt = 1,
};

@@ -2227,10 +2452,15 @@ static const struct ravb_hw_info gbeth_hw_info = {
	.set_feature = ravb_set_features_gbeth,
	.dmac_init = ravb_dmac_init_gbeth,
	.emac_init = ravb_emac_init_gbeth,
	.max_rx_len = GBETH_RX_BUFF_MAX + RAVB_ALIGN - 1,
	.tsrq = TCCR_TSRQ0,
	.gstrings_stats = ravb_gstrings_stats_gbeth,
	.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
	.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
	.max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
	.tccr_mask = TCCR_TSRQ0,
	.rx_max_buf_size = SZ_8K,
	.aligned_tx = 1,
	.tx_counters = 1,
	.carrier_counters = 1,
	.half_duplex = 1,
};

@@ -2389,7 +2619,7 @@ static int ravb_probe(struct platform_device *pdev)
	priv->pdev = pdev;
	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
	if (info->nc_queue) {
	if (info->nc_queues) {
		priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
		priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
	}
@@ -2452,7 +2682,7 @@ static int ravb_probe(struct platform_device *pdev)
	}
	clk_prepare_enable(priv->refclk);

	ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
	ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
	ndev->min_mtu = ETH_MIN_MTU;

	/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2525,7 +2755,7 @@ static int ravb_probe(struct platform_device *pdev)
	}

	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
	if (info->nc_queue)
	if (info->nc_queues)
		netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);

	/* Network device register */
@@ -2544,7 +2774,7 @@ static int ravb_probe(struct platform_device *pdev)
	return 0;

out_napi_del:
	if (info->nc_queue)
	if (info->nc_queues)
		netif_napi_del(&priv->napi[RAVB_NC]);

	netif_napi_del(&priv->napi[RAVB_BE]);
@@ -2585,7 +2815,7 @@ static int ravb_remove(struct platform_device *pdev)
	ravb_write(ndev, CCC_OPC_RESET, CCC);
	pm_runtime_put_sync(&pdev->dev);
	unregister_netdev(ndev);
	if (info->nc_queue)
	if (info->nc_queues)
		netif_napi_del(&priv->napi[RAVB_NC]);
	netif_napi_del(&priv->napi[RAVB_BE]);
	ravb_mdio_release(priv);
@@ -2609,7 +2839,7 @@ static int ravb_wol_setup(struct net_device *ndev)

	/* Only allow ECI interrupts */
	synchronize_irq(priv->emac_irq);
	if (info->nc_queue)
	if (info->nc_queues)
		napi_disable(&priv->napi[RAVB_NC]);
	napi_disable(&priv->napi[RAVB_BE]);
	ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
@@ -2626,7 +2856,7 @@ static int ravb_wol_restore(struct net_device *ndev)
	const struct ravb_hw_info *info = priv->info;
	int ret;

	if (info->nc_queue)
	if (info->nc_queues)
		napi_enable(&priv->napi[RAVB_NC]);
	napi_enable(&priv->napi[RAVB_BE]);