Commit 79cdf17e authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ionic-on-chip-desc'



Shannon Nelson says:

====================
ionic: on-chip descriptors

We start with a couple of house-keeping patches that were originally
presented for 'net', then we add support for on-chip descriptor rings
for tx-push, as well as adding support for rx-push.

I have a patch for the ethtool userland utility that I can send out
once this has been accepted.

v4: added rx-push attributes to ethtool netlink
    converted CMB feature from using a priv-flag to using ethtool tx/rx-push

v3: edited commit message to describe interface-down limitation
    added warn msg if cmb_inuse alloc fails
    removed unnecessary clearing of phy_cmb_pages and cmb_npages
    changed cmb_rings_toggle to use cmb_inuse
    removed unrelated pci_set_drvdata()
    removed unnecessary (u32) cast
    added static inline func for writing CMB descriptors

v2: dropped the rx buffers patch
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8024edf3 40bc471d
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -874,6 +874,7 @@ Kernel response contents:
  ``ETHTOOL_A_RINGS_TCP_DATA_SPLIT``    u8      TCP header / data split
  ``ETHTOOL_A_RINGS_CQE_SIZE``          u32     Size of TX/RX CQE
  ``ETHTOOL_A_RINGS_TX_PUSH``           u8      flag of TX Push mode
  ``ETHTOOL_A_RINGS_RX_PUSH``           u8      flag of RX Push mode
  ====================================  ======  ===========================

``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
@@ -883,8 +884,8 @@ separate buffers. The device configuration must make it possible to receive
full memory pages of data, for example because MTU is high enough or through
HW-GRO.

``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
path to send packets. In ordinary path, driver fills descriptors in DRAM and
``ETHTOOL_A_RINGS_[RX|TX]_PUSH`` flag is used to enable descriptor fast
path to send or receive packets. In ordinary path, driver fills descriptors in DRAM and
notifies NIC hardware. In fast path, driver pushes descriptors to the device
through MMIO writes, thus reducing the latency. However, enabling this feature
may increase the CPU cost. Drivers may enforce additional per-packet
@@ -906,6 +907,7 @@ Request contents:
  ``ETHTOOL_A_RINGS_RX_BUF_LEN``        u32     size of buffers on the ring
  ``ETHTOOL_A_RINGS_CQE_SIZE``          u32     Size of TX/RX CQE
  ``ETHTOOL_A_RINGS_TX_PUSH``           u8      flag of TX Push mode
  ``ETHTOOL_A_RINGS_RX_PUSH``           u8      flag of RX Push mode
  ====================================  ======  ===========================

Kernel checks that requested ring sizes do not exceed limits reported by
+4 −2
Original line number Diff line number Diff line
@@ -121,7 +121,7 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)

		if (v->stats_pa) {
			vfc.stats_pa = 0;
			(void)ionic_set_vf_config(ionic, i, &vfc);
			ionic_set_vf_config(ionic, i, &vfc);
			dma_unmap_single(ionic->dev, v->stats_pa,
					 sizeof(v->stats), DMA_FROM_DEVICE);
			v->stats_pa = 0;
@@ -169,7 +169,7 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)

		/* ignore failures from older FW, we just won't get stats */
		vfc.stats_pa = cpu_to_le64(v->stats_pa);
		(void)ionic_set_vf_config(ionic, i, &vfc);
		ionic_set_vf_config(ionic, i, &vfc);
	}

out:
@@ -352,6 +352,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_reset:
	ionic_reset(ionic);
err_out_teardown:
	ionic_dev_teardown(ionic);
	pci_clear_master(pdev);
	/* Don't fail the probe for these errors, keep
	 * the hw interface around for inspection
@@ -390,6 +391,7 @@ static void ionic_remove(struct pci_dev *pdev)

	ionic_port_reset(ionic);
	ionic_reset(ionic);
	ionic_dev_teardown(ionic);
	pci_clear_master(pdev);
	ionic_unmap_bars(ionic);
	pci_release_regions(pdev);
+67 −0
Original line number Diff line number Diff line
@@ -92,6 +92,7 @@ int ionic_dev_setup(struct ionic *ionic)
	unsigned int num_bars = ionic->num_bars;
	struct ionic_dev *idev = &ionic->idev;
	struct device *dev = ionic->dev;
	int size;
	u32 sig;

	/* BAR0: dev_cmd and interrupts */
@@ -133,9 +134,36 @@ int ionic_dev_setup(struct ionic *ionic)
	idev->db_pages = bar->vaddr;
	idev->phy_db_pages = bar->bus_addr;

	/* BAR2: optional controller memory mapping */
	bar++;
	mutex_init(&idev->cmb_inuse_lock);
	if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
		idev->cmb_inuse = NULL;
		return 0;
	}

	idev->phy_cmb_pages = bar->bus_addr;
	idev->cmb_npages = bar->len / PAGE_SIZE;
	size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
	idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
	if (!idev->cmb_inuse)
		dev_warn(dev, "No memory for CMB, disabling\n");

	return 0;
}

void ionic_dev_teardown(struct ionic *ionic)
{
	struct ionic_dev *idev = &ionic->idev;

	kfree(idev->cmb_inuse);
	idev->cmb_inuse = NULL;
	idev->phy_cmb_pages = 0;
	idev->cmb_npages = 0;

	mutex_destroy(&idev->cmb_inuse_lock);
}

/* Devcmd Interface */
bool ionic_is_fw_running(struct ionic_dev *idev)
{
@@ -571,6 +599,33 @@ int ionic_db_page_num(struct ionic_lif *lif, int pid)
	return (lif->hw_index * lif->dbid_count) + pid;
}

int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
{
	struct ionic_dev *idev = &lif->ionic->idev;
	int ret;

	mutex_lock(&idev->cmb_inuse_lock);
	ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
	mutex_unlock(&idev->cmb_inuse_lock);

	if (ret < 0)
		return ret;

	*pgid = ret;
	*pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;

	return 0;
}

void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
	struct ionic_dev *idev = &lif->ionic->idev;

	mutex_lock(&idev->cmb_inuse_lock);
	bitmap_release_region(idev->cmb_inuse, pgid, order);
	mutex_unlock(&idev->cmb_inuse_lock);
}

int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
		  struct ionic_intr_info *intr,
		  unsigned int num_descs, size_t desc_size)
@@ -679,6 +734,18 @@ void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
		cur->desc = base + (i * q->desc_size);
}

void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
{
	struct ionic_desc_info *cur;
	unsigned int i;

	q->cmb_base = base;
	q->cmb_base_pa = base_pa;

	for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
		cur->cmb_desc = base + (i * q->desc_size);
}

void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
{
	struct ionic_desc_info *cur;
+13 −0
Original line number Diff line number Diff line
@@ -159,6 +159,11 @@ struct ionic_dev {
	struct ionic_intr __iomem *intr_ctrl;
	u64 __iomem *intr_status;

	struct mutex cmb_inuse_lock; /* for cmb_inuse */
	unsigned long *cmb_inuse;
	dma_addr_t phy_cmb_pages;
	u32 cmb_npages;

	u32 port_info_sz;
	struct ionic_port_info *port_info;
	dma_addr_t port_info_pa;
@@ -203,6 +208,7 @@ struct ionic_desc_info {
		struct ionic_rxq_desc *rxq_desc;
		struct ionic_admin_cmd *adminq_desc;
	};
	void __iomem *cmb_desc;
	union {
		void *sg_desc;
		struct ionic_txq_sg_desc *txq_sg_desc;
@@ -241,12 +247,14 @@ struct ionic_queue {
		struct ionic_rxq_desc *rxq;
		struct ionic_admin_cmd *adminq;
	};
	void __iomem *cmb_base;
	union {
		void *sg_base;
		struct ionic_txq_sg_desc *txq_sgl;
		struct ionic_rxq_sg_desc *rxq_sgl;
	};
	dma_addr_t base_pa;
	dma_addr_t cmb_base_pa;
	dma_addr_t sg_base_pa;
	unsigned int desc_size;
	unsigned int sg_desc_size;
@@ -309,6 +317,7 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)

void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
void ionic_dev_teardown(struct ionic *ionic);

void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
@@ -344,6 +353,9 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,

int ionic_db_page_num(struct ionic_lif *lif, int pid);

int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);

int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
		  struct ionic_intr_info *intr,
		  unsigned int num_descs, size_t desc_size);
@@ -360,6 +372,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
		 unsigned int num_descs, size_t desc_size,
		 size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
		  void *cb_arg);
+111 −6
Original line number Diff line number Diff line
@@ -511,6 +511,87 @@ static int ionic_set_coalesce(struct net_device *netdev,
	return 0;
}

static int ionic_validate_cmb_config(struct ionic_lif *lif,
				     struct ionic_queue_params *qparam)
{
	int pages_have, pages_required = 0;
	unsigned long sz;

	if (!lif->ionic->idev.cmb_inuse &&
	    (qparam->cmb_tx || qparam->cmb_rx)) {
		netdev_info(lif->netdev, "CMB rings are not supported on this device\n");
		return -EOPNOTSUPP;
	}

	if (qparam->cmb_tx) {
		if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB)) {
			netdev_info(lif->netdev,
				    "CMB rings for tx-push are not supported on this device\n");
			return -EOPNOTSUPP;
		}

		sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs;
		pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
	}

	if (qparam->cmb_rx) {
		if (!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB)) {
			netdev_info(lif->netdev,
				    "CMB rings for rx-push are not supported on this device\n");
			return -EOPNOTSUPP;
		}

		sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs;
		pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
	}

	pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
	if (pages_required > pages_have) {
		netdev_info(lif->netdev,
			    "Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d",
			    pages_required, pages_have);
		return -ENOMEM;
	}

	return pages_required;
}

static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_tx, bool cmb_rx)
{
	struct ionic_queue_params qparam;
	int pages_used;

	if (netif_running(lif->netdev)) {
		netdev_info(lif->netdev, "Please stop device to toggle CMB for tx/rx-push\n");
		return -EBUSY;
	}

	ionic_init_queue_params(lif, &qparam);
	qparam.cmb_tx = cmb_tx;
	qparam.cmb_rx = cmb_rx;
	pages_used = ionic_validate_cmb_config(lif, &qparam);
	if (pages_used < 0)
		return pages_used;

	if (cmb_tx)
		set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
	else
		clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);

	if (cmb_rx)
		set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
	else
		clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);

	if (cmb_tx || cmb_rx)
		netdev_info(lif->netdev, "Enabling CMB %s %s rings - %d pages\n",
			    cmb_tx ? "TX" : "", cmb_rx ? "RX" : "", pages_used);
	else
		netdev_info(lif->netdev, "Disabling CMB rings\n");

	return 0;
}

static void ionic_get_ringparam(struct net_device *netdev,
				struct ethtool_ringparam *ring,
				struct kernel_ethtool_ringparam *kernel_ring,
@@ -522,6 +603,8 @@ static void ionic_get_ringparam(struct net_device *netdev,
	ring->tx_pending = lif->ntxq_descs;
	ring->rx_max_pending = IONIC_MAX_RX_DESC;
	ring->rx_pending = lif->nrxq_descs;
	kernel_ring->tx_push = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
	kernel_ring->rx_push = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}

static int ionic_set_ringparam(struct net_device *netdev,
@@ -551,9 +634,28 @@ static int ionic_set_ringparam(struct net_device *netdev,

	/* if nothing to do return success */
	if (ring->tx_pending == lif->ntxq_descs &&
	    ring->rx_pending == lif->nrxq_descs)
	    ring->rx_pending == lif->nrxq_descs &&
	    kernel_ring->tx_push == test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) &&
	    kernel_ring->rx_push == test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
		return 0;

	qparam.ntxq_descs = ring->tx_pending;
	qparam.nrxq_descs = ring->rx_pending;
	qparam.cmb_tx = kernel_ring->tx_push;
	qparam.cmb_rx = kernel_ring->rx_push;

	err = ionic_validate_cmb_config(lif, &qparam);
	if (err < 0)
		return err;

	if (kernel_ring->tx_push != test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) ||
	    kernel_ring->rx_push != test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) {
		err = ionic_cmb_rings_toggle(lif, kernel_ring->tx_push,
					     kernel_ring->rx_push);
		if (err < 0)
			return err;
	}

	if (ring->tx_pending != lif->ntxq_descs)
		netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
			    lif->ntxq_descs, ring->tx_pending);
@@ -569,9 +671,6 @@ static int ionic_set_ringparam(struct net_device *netdev,
		return 0;
	}

	qparam.ntxq_descs = ring->tx_pending;
	qparam.nrxq_descs = ring->rx_pending;

	mutex_lock(&lif->queue_lock);
	err = ionic_reconfigure_queues(lif, &qparam);
	mutex_unlock(&lif->queue_lock);
@@ -638,7 +737,7 @@ static int ionic_set_channels(struct net_device *netdev,
				    lif->nxqs, ch->combined_count);

		qparam.nxqs = ch->combined_count;
		qparam.intr_split = 0;
		qparam.intr_split = false;
	} else {
		max_cnt /= 2;
		if (ch->rx_count > max_cnt)
@@ -654,9 +753,13 @@ static int ionic_set_channels(struct net_device *netdev,
				    lif->nxqs, ch->rx_count);

		qparam.nxqs = ch->rx_count;
		qparam.intr_split = 1;
		qparam.intr_split = true;
	}

	err = ionic_validate_cmb_config(lif, &qparam);
	if (err < 0)
		return err;

	/* if we're not running, just set the values and return */
	if (!netif_running(lif->netdev)) {
		lif->nxqs = qparam.nxqs;
@@ -965,6 +1068,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
				     ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
	.supported_ring_params = ETHTOOL_RING_USE_TX_PUSH |
				 ETHTOOL_RING_USE_RX_PUSH,
	.get_drvinfo		= ionic_get_drvinfo,
	.get_regs_len		= ionic_get_regs_len,
	.get_regs		= ionic_get_regs,
Loading