Commit e4c437cd authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge branch 'vmxnet3-upgrade-to-version-7'

Ronak Doshi says:

====================
vmxnet3: upgrade to version 7

vmxnet3 emulation has recently added several new features including
support for uniform passthrough(UPT). To make UPT work vmxnet3 has
to be enhanced as per the new specification. This patch series
extends the vmxnet3 driver to leverage these new features.

Compatibility is maintained using existing vmxnet3 versioning mechanism as
follows:
 - new features added to vmxnet3 emulation are associated with new vmxnet3
   version viz. vmxnet3 version 7.
 - emulation advertises all the versions it supports to the driver.
 - during initialization, vmxnet3 driver picks the highest version number
 supported by both the emulation and the driver and configures emulation
 to run at that version.

In particular, following changes are introduced:

Patch 1:
  This patch introduces utility macros for vmxnet3 version 7 comparison
  and updates Copyright information.

Patch 2:
  This patch adds new capability registers to fine control enablement of
  individual features based on emulation and passthrough.

Patch 3:
  This patch adds support for large passthrough BAR register.

Patch 4:
  This patch adds support for out of order rx completion processing.

Patch 5:
  This patch introduces new command to set ring buffer sizes to pass this
  information to the hardware.

Patch 6:
  For better performance, hardware has a requirement to limit number of TSO
  descriptors. This patch adds that support.

Patch 7:
  With vmxnet3 version 7, new descriptor fields are used to indicate
  encapsulation offload.

Patch 8:
  With all vmxnet3 version 7 changes incorporated in the vmxnet3 driver,
  with this patch, the driver can configure emulation to run at vmxnet3
  version 7.

Changes in v2->v3:
 - use correct byte ordering for ringBufSize

Changes in v2:
 - use local rss_fields variable for the rss capability checks in patch 2
====================

Link: https://lore.kernel.org/r/20220608032353.964-1-doshir@vmware.com


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 5834e72e acc38e04
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
# Copyright (C) 2007-2021, VMware, Inc. All Rights Reserved.
# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
+1 −1
Original line number Diff line number Diff line
/*
 * Linux driver for VMware's vmxnet3 ethernet NIC.
 *
 * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
+70 −10
Original line number Diff line number Diff line
/*
 * Linux driver for VMware's vmxnet3 ethernet NIC.
 *
 * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
@@ -40,7 +40,13 @@ enum {
	VMXNET3_REG_MACL	= 0x28,	/* MAC Address Low */
	VMXNET3_REG_MACH	= 0x30,	/* MAC Address High */
	VMXNET3_REG_ICR		= 0x38,	/* Interrupt Cause Register */
	VMXNET3_REG_ECR		= 0x40	/* Event Cause Register */
	VMXNET3_REG_ECR		= 0x40, /* Event Cause Register */
	VMXNET3_REG_DCR         = 0x48, /* Device capability register,
					 * from 0x48 to 0x80
					 */
	VMXNET3_REG_PTCR        = 0x88, /* Passthru capbility register
					 * from 0x88 to 0xb0
					 */
};

/* BAR 0 */
@@ -51,8 +57,18 @@ enum {
	VMXNET3_REG_RXPROD2	= 0xA00	 /* Rx Producer Index for ring 2 */
};

/* For Large PT BAR, the following offset to DB register */
enum {
	VMXNET3_REG_LB_TXPROD   = 0x1000, /* Tx Producer Index */
	VMXNET3_REG_LB_RXPROD   = 0x1400, /* Rx Producer Index for ring 1 */
	VMXNET3_REG_LB_RXPROD2  = 0x1800, /* Rx Producer Index for ring 2 */
};

#define VMXNET3_PT_REG_SIZE         4096		/* BAR 0 */
#define VMXNET3_LARGE_PT_REG_SIZE   8192		/* large PT pages */
#define VMXNET3_VD_REG_SIZE         4096		/* BAR 1 */
#define VMXNET3_LARGE_BAR0_REG_SIZE (4096 * 4096)	/* LARGE BAR 0 */
#define VMXNET3_OOB_REG_SIZE        (4094 * 4096)	/* OOB pages */

#define VMXNET3_REG_ALIGN       8	/* All registers are 8-byte aligned. */
#define VMXNET3_REG_ALIGN_MASK  0x7
@@ -83,6 +99,9 @@ enum {
	VMXNET3_CMD_SET_COALESCE,
	VMXNET3_CMD_REGISTER_MEMREGS,
	VMXNET3_CMD_SET_RSS_FIELDS,
	VMXNET3_CMD_RESERVED4,
	VMXNET3_CMD_RESERVED5,
	VMXNET3_CMD_SET_RING_BUFFER_SIZE,

	VMXNET3_CMD_FIRST_GET = 0xF00D0000,
	VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -101,6 +120,9 @@ enum {
	VMXNET3_CMD_GET_RESERVED2,
	VMXNET3_CMD_GET_RESERVED3,
	VMXNET3_CMD_GET_MAX_QUEUES_CONF,
	VMXNET3_CMD_GET_RESERVED4,
	VMXNET3_CMD_GET_MAX_CAPABILITIES,
	VMXNET3_CMD_GET_DCR0_REG,
};

/*
@@ -126,17 +148,17 @@ struct Vmxnet3_TxDesc {

#ifdef __BIG_ENDIAN_BITFIELD
	u32 msscof:14;  /* MSS, checksum offset, flags */
	u32 ext1:1;
	u32 ext1:1;     /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
	u32 dtype:1;    /* descriptor type */
	u32 oco:1;
	u32 oco:1;      /* Outer csum offload */
	u32 gen:1;      /* generation bit */
	u32 len:14;
#else
	u32 len:14;
	u32 gen:1;      /* generation bit */
	u32 oco:1;
	u32 oco:1;      /* Outer csum offload */
	u32 dtype:1;    /* descriptor type */
	u32 ext1:1;
	u32 ext1:1;     /* set to 1 to indicate inner csum/tso, vmxnet3 v7 */
	u32 msscof:14;  /* MSS, checksum offset, flags */
#endif  /* __BIG_ENDIAN_BITFIELD */

@@ -240,11 +262,13 @@ struct Vmxnet3_RxCompDesc {
	u32		rqID:10;      /* rx queue/ring ID */
	u32		sop:1;        /* Start of Packet */
	u32		eop:1;        /* End of Packet */
	u32		ext1:2;
	u32		ext1:2;       /* bit 0: indicating v4/v6/.. is for inner header */
				      /* bit 1: indicating rssType is based on inner header */
	u32		rxdIdx:12;    /* Index of the RxDesc */
#else
	u32		rxdIdx:12;    /* Index of the RxDesc */
	u32		ext1:2;
	u32		ext1:2;       /* bit 0: indicating v4/v6/.. is for inner header */
				      /* bit 1: indicating rssType is based on inner header */
	u32		eop:1;        /* End of Packet */
	u32		sop:1;        /* Start of Packet */
	u32		rqID:10;      /* rx queue/ring ID */
@@ -378,6 +402,8 @@ union Vmxnet3_GenericDesc {

/* max # of tx descs for a non-tso pkt */
#define VMXNET3_MAX_TXD_PER_PKT 16
/* max # of tx descs for a tso pkt */
#define VMXNET3_MAX_TSO_TXD_PER_PKT 24

/* Max size of a single rx buffer */
#define VMXNET3_MAX_RX_BUF_SIZE  ((1 << 14) - 1)
@@ -724,6 +750,13 @@ enum Vmxnet3_RSSField {
	VMXNET3_RSS_FIELDS_ESPIP6 = 0x0020,
};

struct Vmxnet3_RingBufferSize {
	__le16             ring1BufSizeType0;
	__le16             ring1BufSizeType1;
	__le16             ring2BufSizeType1;
	__le16             pad;
};

/* If the command data <= 16 bytes, use the shared memory directly.
 * otherwise, use variable length configuration descriptor.
 */
@@ -731,6 +764,7 @@ union Vmxnet3_CmdInfo {
	struct Vmxnet3_VariableLenConfDesc	varConf;
	struct Vmxnet3_SetPolling		setPolling;
	enum   Vmxnet3_RSSField                 setRssFields;
	struct Vmxnet3_RingBufferSize           ringBufSize;
	__le64					data[2];
};

@@ -801,4 +835,30 @@ struct Vmxnet3_DriverShared {
#define VMXNET3_LINK_UP         (10000 << 16 | 1)    /* 10 Gbps, up */
#define VMXNET3_LINK_DOWN       0

#define VMXNET3_DCR_ERROR                          31   /* error when bit 31 of DCR is set */
#define VMXNET3_CAP_UDP_RSS                        0    /* bit 0 of DCR 0 */
#define VMXNET3_CAP_ESP_RSS_IPV4                   1    /* bit 1 of DCR 0 */
#define VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD        2    /* bit 2 of DCR 0 */
#define VMXNET3_CAP_GENEVE_TSO                     3    /* bit 3 of DCR 0 */
#define VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD         4    /* bit 4 of DCR 0 */
#define VMXNET3_CAP_VXLAN_TSO                      5    /* bit 5 of DCR 0 */
#define VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD  6    /* bit 6 of DCR 0 */
#define VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD   7    /* bit 7 of DCR 0 */
#define VMXNET3_CAP_PKT_STEERING_IPV4              8    /* bit 8 of DCR 0 */
#define VMXNET3_CAP_VERSION_4_MAX                  VMXNET3_CAP_PKT_STEERING_IPV4
#define VMXNET3_CAP_ESP_RSS_IPV6                   9    /* bit 9 of DCR 0 */
#define VMXNET3_CAP_VERSION_5_MAX                  VMXNET3_CAP_ESP_RSS_IPV6
#define VMXNET3_CAP_ESP_OVER_UDP_RSS               10   /* bit 10 of DCR 0 */
#define VMXNET3_CAP_INNER_RSS                      11   /* bit 11 of DCR 0 */
#define VMXNET3_CAP_INNER_ESP_RSS                  12   /* bit 12 of DCR 0 */
#define VMXNET3_CAP_CRC32_HASH_FUNC                13   /* bit 13 of DCR 0 */
#define VMXNET3_CAP_VERSION_6_MAX                  VMXNET3_CAP_CRC32_HASH_FUNC
#define VMXNET3_CAP_OAM_FILTER                     14   /* bit 14 of DCR 0 */
#define VMXNET3_CAP_ESP_QS                         15   /* bit 15 of DCR 0 */
#define VMXNET3_CAP_LARGE_BAR                      16   /* bit 16 of DCR 0 */
#define VMXNET3_CAP_OOORX_COMP                     17   /* bit 17 of DCR 0 */
#define VMXNET3_CAP_VERSION_7_MAX                  18
/* when new capability is introduced, update VMXNET3_CAP_MAX */
#define VMXNET3_CAP_MAX                            VMXNET3_CAP_VERSION_7_MAX

#endif /* _VMXNET3_DEFS_H_ */
+258 −33
Original line number Diff line number Diff line
/*
 * Linux driver for VMware's vmxnet3 ethernet NIC.
 *
 * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
 * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
@@ -130,6 +130,20 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}

/* Check if capability is supported by UPT device or
 * UPT is even requested
 */
bool
vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
{
	if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
	    cap_supported & (1UL << cap)) {
		return true;
	}

	return false;
}


/*
 * Check the link state. This may start or stop the tx queue.
@@ -571,6 +585,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,

		rbi = rbi_base + ring->next2fill;
		gd = ring->base + ring->next2fill;
		rbi->comp_state = VMXNET3_RXD_COMP_PENDING;

		if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
			if (rbi->skb == NULL) {
@@ -630,8 +645,10 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,

		/* Fill the last buffer but dont mark it ready, or else the
		 * device will think that the queue is full */
		if (num_allocated == num_to_alloc)
		if (num_allocated == num_to_alloc) {
			rbi->comp_state = VMXNET3_RXD_COMP_DONE;
			break;
		}

		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
		num_allocated++;
@@ -1044,6 +1061,23 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
			}
			tq->stats.copy_skb_header++;
		}
		if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
			/* tso pkts must not use more than
			 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
			 */
			if (skb_linearize(skb) != 0) {
				tq->stats.drop_too_many_frags++;
				goto drop_pkt;
			}
			tq->stats.linearized++;

			/* recalculate the # of descriptors to use */
			count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
			if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
				tq->stats.drop_too_many_frags++;
				goto drop_pkt;
			}
		}
		if (skb->encapsulation) {
			vmxnet3_prepare_inner_tso(skb, &ctx);
		} else {
@@ -1127,7 +1161,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
	if (ctx.mss) {
		if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
			gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
			if (VMXNET3_VERSION_GE_7(adapter)) {
				gdesc->txd.om = VMXNET3_OM_TSO;
				gdesc->txd.ext1 = 1;
			} else {
				gdesc->txd.om = VMXNET3_OM_ENCAP;
			}
			gdesc->txd.msscof = ctx.mss;

			if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
@@ -1144,8 +1183,15 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
			    skb->encapsulation) {
				gdesc->txd.hlen = ctx.l4_offset +
						  ctx.l4_hdr_size;
				if (VMXNET3_VERSION_GE_7(adapter)) {
					gdesc->txd.om = VMXNET3_OM_CSUM;
					gdesc->txd.msscof = ctx.l4_offset +
							    skb->csum_offset;
					gdesc->txd.ext1 = 1;
				} else {
					gdesc->txd.om = VMXNET3_OM_ENCAP;
					gdesc->txd.msscof = 0;		/* Reserved */
				}
			} else {
				gdesc->txd.hlen = ctx.l4_offset;
				gdesc->txd.om = VMXNET3_OM_CSUM;
@@ -1193,7 +1239,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
	if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
		tq->shared->txNumDeferred = 0;
		VMXNET3_WRITE_BAR0_REG(adapter,
				       VMXNET3_REG_TXPROD + tq->qid * 8,
				       adapter->tx_prod_offset + tq->qid * 8,
				       tq->tx_ring.next2fill);
	}

@@ -1345,14 +1391,15 @@ static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
		       struct vmxnet3_adapter *adapter, int quota)
{
	static const u32 rxprod_reg[2] = {
		VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
	u32 rxprod_reg[2] = {
		adapter->rx_prod_offset, adapter->rx_prod2_offset
	};
	u32 num_pkts = 0;
	bool skip_page_frags = false;
	struct Vmxnet3_RxCompDesc *rcd;
	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
	u16 segCnt = 0, mss = 0;
	int comp_offset, fill_offset;
#ifdef __BIG_ENDIAN_BITFIELD
	struct Vmxnet3_RxDesc rxCmdDesc;
	struct Vmxnet3_RxCompDesc rxComp;
@@ -1625,9 +1672,15 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,

rcd_done:
		/* device may have skipped some rx descs */
		ring = rq->rx_ring + ring_idx;
		rbi->comp_state = VMXNET3_RXD_COMP_DONE;

		comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
		fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
			      idx - ring->next2fill - 1;
		if (!ring->isOutOfOrder || fill_offset >= comp_offset)
			ring->next2comp = idx;
		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
		ring = rq->rx_ring + ring_idx;

		/* Ensure that the writes to rxd->gen bits will be observed
		 * after all other writes to rxd objects.
@@ -1635,18 +1688,38 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
		dma_wmb();

		while (num_to_alloc) {
			rbi = rq->buf_info[ring_idx] + ring->next2fill;
			if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
				goto refill_buf;
			if (ring_idx == 0) {
				/* ring0 Type1 buffers can get skipped; re-fill them */
				if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
					goto refill_buf;
			}
			if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
refill_buf:
				vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
						  &rxCmdDesc);
			BUG_ON(!rxd->addr);
				WARN_ON(!rxd->addr);

				/* Recv desc is ready to be used by the device */
				rxd->gen = ring->gen;
				vmxnet3_cmd_ring_adv_next2fill(ring);
				rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
				num_to_alloc--;
			} else {
				/* rx completion hasn't occurred */
				ring->isOutOfOrder = 1;
				break;
			}
		}

		if (num_to_alloc == 0) {
			ring->isOutOfOrder = 0;
		}

		/* if needed, update the register */
		if (unlikely(rq->shared->updateRxProd)) {
		if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
			VMXNET3_WRITE_BAR0_REG(adapter,
					       rxprod_reg[ring_idx] + rq->qid * 8,
					       ring->next2fill);
@@ -1810,6 +1883,7 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
		       sizeof(struct Vmxnet3_RxDesc));
		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
		rq->rx_ring[i].isOutOfOrder = 0;
	}
	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
				    adapter) == 0) {
@@ -2000,8 +2074,17 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);

	if (rxd_done < budget) {
		struct Vmxnet3_RxCompDesc *rcd;
#ifdef __BIG_ENDIAN_BITFIELD
		struct Vmxnet3_RxCompDesc rxComp;
#endif
		napi_complete_done(napi, rxd_done);
		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
		/* after unmasking the interrupt, check if any descriptors were completed */
		vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
				  &rxComp);
		if (rcd->gen == rq->comp_ring.gen && napi_reschedule(napi))
			vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
	}
	return rxd_done;
}
@@ -2626,6 +2709,23 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
	/* the rest are already zeroed */
}

static void
vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
{
	struct Vmxnet3_DriverShared *shared = adapter->shared;
	union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
	unsigned long flags;

	if (!VMXNET3_VERSION_GE_7(adapter))
		return;

	cmdInfo->ringBufSize = adapter->ringBufSize;
	spin_lock_irqsave(&adapter->cmd_lock, flags);
	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
			       VMXNET3_CMD_SET_RING_BUFFER_SIZE);
	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}

static void
vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
{
@@ -2671,6 +2771,36 @@ vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
		adapter->rss_fields =
			VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
	} else {
		if (VMXNET3_VERSION_GE_7(adapter)) {
			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
			     adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
						       VMXNET3_CAP_UDP_RSS)) {
				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
			} else {
				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
			}

			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
						       VMXNET3_CAP_ESP_RSS_IPV4)) {
				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
			} else {
				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
			}

			if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
			    vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
						       VMXNET3_CAP_ESP_RSS_IPV6)) {
				adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
			} else {
				adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
			}

			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
			adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
		}
		cmdInfo->setRssFields = adapter->rss_fields;
		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
				       VMXNET3_CMD_SET_RSS_FIELDS);
@@ -2734,14 +2864,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
		goto activate_err;
	}

	vmxnet3_init_bufsize(adapter);
	vmxnet3_init_coalesce(adapter);
	vmxnet3_init_rssfields(adapter);

	for (i = 0; i < adapter->num_rx_queues; i++) {
		VMXNET3_WRITE_BAR0_REG(adapter,
				VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
				adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
				adapter->rx_queue[i].rx_ring[0].next2fill);
		VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
		VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
				(i * VMXNET3_REG_ALIGN)),
				adapter->rx_queue[i].rx_ring[1].next2fill);
	}
@@ -2907,6 +3038,8 @@ static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
	size_t sz, i, ring0_size, ring1_size, comp_size;
	/* With version7 ring1 will have only T0 buffers */
	if (!VMXNET3_VERSION_GE_7(adapter)) {
		if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
					    VMXNET3_MAX_ETH_HDR_SIZE) {
			adapter->skb_buf_size = adapter->netdev->mtu +
@@ -2921,6 +3054,14 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
						    VMXNET3_MAX_ETH_HDR_SIZE;
			adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
		}
	} else {
		adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
					    VMXNET3_MAX_SKB_BUF_SIZE);
		adapter->rx_buf_per_pkt = 1;
		adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
		adapter->ringBufSize.ring1BufSizeType1 = 0;
		adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
	}

	/*
	 * for simplicity, force the ring0 size to be a multiple of
@@ -2935,6 +3076,11 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
	ring1_size = (ring1_size + sz - 1) / sz * sz;
	ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
			   sz * sz);
	/* For v7 and later, keep ring size power of 2 for UPT */
	if (VMXNET3_VERSION_GE_7(adapter)) {
		ring0_size = rounddown_pow_of_two(ring0_size);
		ring1_size = rounddown_pow_of_two(ring1_size);
	}
	comp_size = ring0_size + ring1_size;

	for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -3185,6 +3331,47 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
			NETIF_F_GSO_UDP_TUNNEL_CSUM;
	}

	if (VMXNET3_VERSION_GE_7(adapter)) {
		unsigned long flags;

		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
					       VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
		}
		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
					       VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
		}
		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
					       VMXNET3_CAP_GENEVE_TSO)) {
			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
		}
		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
					       VMXNET3_CAP_VXLAN_TSO)) {
			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
		}
		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
					       VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
		}
		if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
					       VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
			adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
		}

		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
		spin_lock_irqsave(&adapter->cmd_lock, flags);
		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
		adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
		spin_unlock_irqrestore(&adapter->cmd_lock, flags);

		if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
		    !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
			netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
			netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
		}
	}

	netdev->vlan_features = netdev->hw_features &
				~(NETIF_F_HW_VLAN_CTAG_TX |
				  NETIF_F_HW_VLAN_CTAG_RX);
@@ -3472,7 +3659,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
		goto err_alloc_pci;

	ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
	if (ver & (1 << VMXNET3_REV_6)) {
	if (ver & (1 << VMXNET3_REV_7)) {
		VMXNET3_WRITE_BAR1_REG(adapter,
				       VMXNET3_REG_VRRS,
				       1 << VMXNET3_REV_7);
		adapter->version = VMXNET3_REV_7 + 1;
	} else if (ver & (1 << VMXNET3_REV_6)) {
		VMXNET3_WRITE_BAR1_REG(adapter,
				       VMXNET3_REG_VRRS,
				       1 << VMXNET3_REV_6);
@@ -3520,6 +3712,39 @@ vmxnet3_probe_device(struct pci_dev *pdev,
		goto err_ver;
	}

	if (VMXNET3_VERSION_GE_7(adapter)) {
		adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
		adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
		if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
			adapter->dev_caps[0] = adapter->devcap_supported[0] &
							(1UL << VMXNET3_CAP_LARGE_BAR);
		}
		if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
		    adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
		    adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
			adapter->dev_caps[0] |= adapter->devcap_supported[0] &
						(1UL << VMXNET3_CAP_OOORX_COMP);
		}
		if (adapter->dev_caps[0])
			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);

		spin_lock_irqsave(&adapter->cmd_lock, flags);
		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
		adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
	}

	if (VMXNET3_VERSION_GE_7(adapter) &&
	    adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
		adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
		adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
		adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
	} else {
		adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
		adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
		adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
	}

	if (VMXNET3_VERSION_GE_6(adapter)) {
		spin_lock_irqsave(&adapter->cmd_lock, flags);
		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+107 −9

File changed.

Preview size limit exceeded, changes collapsed.

Loading