Unverified Commit b24b5c65 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4725 [OLK-6.6] merge upstream net-v6.7 all wangxun patches

Merge Pull Request from: @duanqiangwen 
 
Backport upstream/net tag v6.7 all wangxun patches from upstream

issue:
https://gitee.com/openeuler/kernel/issues/I93QRU

Alread merged(16):
907ee6681788:"net: fill in MODULE_DESCRIPTION()s for wx_lib"
b3739fb3a9e6:"wangxunx: select CONFIG_PHYLINK where needed"
b746dc6bdde5:"net: wangxun: add ethtool_ops for msglevel"
4ac2d9dff4b0:"net: wangxun: add coalesce options support"
883b5984a5d2:"net: wangxun: add ethtool_ops for ring parameters"
2fe2ca09da95:"net: wangxun: add flow control support"
bc2426d74aa3:"net: ngbe: convert phylib to phylink"
4491c602fe5f:"net: txgbe: use phylink bits added in libwx"
e8e138cf7383:"net: libwx: add phylink to libwx"
48e44287c653:"net: wangxun: remove redundant kernel log"
0a2714d5e2d3:"net: ngbe: add ethtool stats support"
9224ade65390:"net: txgbe: add ethtool stats support"
f55752402945:"net: wangxun: move MDIO bus implementation to the library"
738b54b9b623:"net: libwx: fix memory leak on free page"
46b92e10d631:"net: libwx: support hardware statistics"
87e839c82cc3:"net: wangxun: fix changing mac failed when running"

 
 
Link:https://gitee.com/openeuler/kernel/pulls/4725

 

Reviewed-by: default avatarYue Haibing <yuehaibing@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 0f99741b 77eefa57
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@ config NGBE
	tristate "Wangxun(R) GbE PCI Express adapters support"
	depends on PCI
	select LIBWX
	select PHYLIB
	select PHYLINK
	help
	  This driver supports Wangxun(R) GbE PCI Express family of
	  adapters.
+348 −0
Original line number Diff line number Diff line
@@ -3,9 +3,172 @@

#include <linux/pci.h>
#include <linux/phy.h>
#include <linux/ethtool.h>

#include "wx_type.h"
#include "wx_ethtool.h"
#include "wx_hw.h"
#include "wx_lib.h"

struct wx_stats {
	char stat_string[ETH_GSTRING_LEN];
	size_t sizeof_stat;
	off_t stat_offset;
};

#define WX_STAT(str, m) { \
		.stat_string = str, \
		.sizeof_stat = sizeof(((struct wx *)0)->m), \
		.stat_offset = offsetof(struct wx, m) }

static const struct wx_stats wx_gstrings_stats[] = {
	WX_STAT("rx_dma_pkts", stats.gprc),
	WX_STAT("tx_dma_pkts", stats.gptc),
	WX_STAT("rx_dma_bytes", stats.gorc),
	WX_STAT("tx_dma_bytes", stats.gotc),
	WX_STAT("rx_total_pkts", stats.tpr),
	WX_STAT("tx_total_pkts", stats.tpt),
	WX_STAT("rx_long_length_count", stats.roc),
	WX_STAT("rx_short_length_count", stats.ruc),
	WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
	WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
	WX_STAT("os2bmc_tx_by_host", stats.o2bspc),
	WX_STAT("os2bmc_rx_by_host", stats.b2ogprc),
	WX_STAT("rx_no_dma_resources", stats.rdmdrop),
	WX_STAT("tx_busy", tx_busy),
	WX_STAT("non_eop_descs", non_eop_descs),
	WX_STAT("tx_restart_queue", restart_queue),
	WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
	WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
	WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
};

/* drivers allocates num_tx_queues and num_rx_queues symmetrically so
 * we set the num_rx_queues to evaluate to num_tx_queues. This is
 * used because we do not have a good way to get the max number of
 * rx queues with CONFIG_RPS disabled.
 */
#define WX_NUM_RX_QUEUES netdev->num_tx_queues
#define WX_NUM_TX_QUEUES netdev->num_tx_queues

#define WX_QUEUE_STATS_LEN ( \
		(WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
		(sizeof(struct wx_queue_stats) / sizeof(u64)))
#define WX_GLOBAL_STATS_LEN  ARRAY_SIZE(wx_gstrings_stats)
#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)

int wx_get_sset_count(struct net_device *netdev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return WX_STATS_LEN;
	default:
		return -EOPNOTSUPP;
	}
}
EXPORT_SYMBOL(wx_get_sset_count);

void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
	u8 *p = data;
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
			ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string);
		for (i = 0; i < netdev->num_tx_queues; i++) {
			ethtool_sprintf(&p, "tx_queue_%u_packets", i);
			ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
		}
		for (i = 0; i < WX_NUM_RX_QUEUES; i++) {
			ethtool_sprintf(&p, "rx_queue_%u_packets", i);
			ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
		}
		break;
	}
}
EXPORT_SYMBOL(wx_get_strings);

void wx_get_ethtool_stats(struct net_device *netdev,
			  struct ethtool_stats *stats, u64 *data)
{
	struct wx *wx = netdev_priv(netdev);
	struct wx_ring *ring;
	unsigned int start;
	int i, j;
	char *p;

	wx_update_stats(wx);

	for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) {
		p = (char *)wx + wx_gstrings_stats[i].stat_offset;
		data[i] = (wx_gstrings_stats[i].sizeof_stat ==
			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
	}

	for (j = 0; j < netdev->num_tx_queues; j++) {
		ring = wx->tx_ring[j];
		if (!ring) {
			data[i++] = 0;
			data[i++] = 0;
			continue;
		}

		do {
			start = u64_stats_fetch_begin(&ring->syncp);
			data[i] = ring->stats.packets;
			data[i + 1] = ring->stats.bytes;
		} while (u64_stats_fetch_retry(&ring->syncp, start));
		i += 2;
	}
	for (j = 0; j < WX_NUM_RX_QUEUES; j++) {
		ring = wx->rx_ring[j];
		if (!ring) {
			data[i++] = 0;
			data[i++] = 0;
			continue;
		}

		do {
			start = u64_stats_fetch_begin(&ring->syncp);
			data[i] = ring->stats.packets;
			data[i + 1] = ring->stats.bytes;
		} while (u64_stats_fetch_retry(&ring->syncp, start));
		i += 2;
	}
}
EXPORT_SYMBOL(wx_get_ethtool_stats);

void wx_get_mac_stats(struct net_device *netdev,
		      struct ethtool_eth_mac_stats *mac_stats)
{
	struct wx *wx = netdev_priv(netdev);
	struct wx_hw_stats *hwstats;

	wx_update_stats(wx);

	hwstats = &wx->stats;
	mac_stats->MulticastFramesXmittedOK = hwstats->mptc;
	mac_stats->BroadcastFramesXmittedOK = hwstats->bptc;
	mac_stats->MulticastFramesReceivedOK = hwstats->mprc;
	mac_stats->BroadcastFramesReceivedOK = hwstats->bprc;
}
EXPORT_SYMBOL(wx_get_mac_stats);

void wx_get_pause_stats(struct net_device *netdev,
			struct ethtool_pause_stats *stats)
{
	struct wx *wx = netdev_priv(netdev);
	struct wx_hw_stats *hwstats;

	wx_update_stats(wx);

	hwstats = &wx->stats;
	stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
	stats->rx_pause_frames = hwstats->lxonoffrxc;
}
EXPORT_SYMBOL(wx_get_pause_stats);

void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
@@ -14,5 +177,190 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
	strscpy(info->driver, wx->driver_name, sizeof(info->driver));
	strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
	strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
	if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
		info->n_stats = WX_STATS_LEN -
				   (WX_NUM_TX_QUEUES - wx->num_tx_queues) *
				   (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
	} else {
		info->n_stats = WX_STATS_LEN;
	}
}
EXPORT_SYMBOL(wx_get_drvinfo);

int wx_nway_reset(struct net_device *netdev)
{
	struct wx *wx = netdev_priv(netdev);

	return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);

int wx_get_link_ksettings(struct net_device *netdev,
			  struct ethtool_link_ksettings *cmd)
{
	struct wx *wx = netdev_priv(netdev);

	return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);

int wx_set_link_ksettings(struct net_device *netdev,
			  const struct ethtool_link_ksettings *cmd)
{
	struct wx *wx = netdev_priv(netdev);

	return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);

void wx_get_pauseparam(struct net_device *netdev,
		       struct ethtool_pauseparam *pause)
{
	struct wx *wx = netdev_priv(netdev);

	phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);

int wx_set_pauseparam(struct net_device *netdev,
		      struct ethtool_pauseparam *pause)
{
	struct wx *wx = netdev_priv(netdev);

	return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);

void wx_get_ringparam(struct net_device *netdev,
		      struct ethtool_ringparam *ring,
		      struct kernel_ethtool_ringparam *kernel_ring,
		      struct netlink_ext_ack *extack)
{
	struct wx *wx = netdev_priv(netdev);

	ring->rx_max_pending = WX_MAX_RXD;
	ring->tx_max_pending = WX_MAX_TXD;
	ring->rx_mini_max_pending = 0;
	ring->rx_jumbo_max_pending = 0;
	ring->rx_pending = wx->rx_ring_count;
	ring->tx_pending = wx->tx_ring_count;
	ring->rx_mini_pending = 0;
	ring->rx_jumbo_pending = 0;
}
EXPORT_SYMBOL(wx_get_ringparam);

int wx_get_coalesce(struct net_device *netdev,
		    struct ethtool_coalesce *ec,
		    struct kernel_ethtool_coalesce *kernel_coal,
		    struct netlink_ext_ack *extack)
{
	struct wx *wx = netdev_priv(netdev);

	ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
	/* only valid if in constant ITR mode */
	if (wx->rx_itr_setting <= 1)
		ec->rx_coalesce_usecs = wx->rx_itr_setting;
	else
		ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;

	/* if in mixed tx/rx queues per vector mode, report only rx settings */
	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
		return 0;

	/* only valid if in constant ITR mode */
	if (wx->tx_itr_setting <= 1)
		ec->tx_coalesce_usecs = wx->tx_itr_setting;
	else
		ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;

	return 0;
}
EXPORT_SYMBOL(wx_get_coalesce);

int wx_set_coalesce(struct net_device *netdev,
		    struct ethtool_coalesce *ec,
		    struct kernel_ethtool_coalesce *kernel_coal,
		    struct netlink_ext_ack *extack)
{
	struct wx *wx = netdev_priv(netdev);
	u16 tx_itr_param, rx_itr_param;
	struct wx_q_vector *q_vector;
	u16 max_eitr;
	int i;

	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
		/* reject Tx specific changes in case of mixed RxTx vectors */
		if (ec->tx_coalesce_usecs)
			return -EOPNOTSUPP;
	}

	if (ec->tx_max_coalesced_frames_irq)
		wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;

	if (wx->mac.type == wx_mac_sp)
		max_eitr = WX_SP_MAX_EITR;
	else
		max_eitr = WX_EM_MAX_EITR;

	if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
	    (ec->tx_coalesce_usecs > (max_eitr >> 2)))
		return -EINVAL;

	if (ec->rx_coalesce_usecs > 1)
		wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
	else
		wx->rx_itr_setting = ec->rx_coalesce_usecs;

	if (wx->rx_itr_setting == 1)
		rx_itr_param = WX_20K_ITR;
	else
		rx_itr_param = wx->rx_itr_setting;

	if (ec->tx_coalesce_usecs > 1)
		wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
	else
		wx->tx_itr_setting = ec->tx_coalesce_usecs;

	if (wx->tx_itr_setting == 1) {
		if (wx->mac.type == wx_mac_sp)
			tx_itr_param = WX_12K_ITR;
		else
			tx_itr_param = WX_20K_ITR;
	} else {
		tx_itr_param = wx->tx_itr_setting;
	}

	/* mixed Rx/Tx */
	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
		wx->tx_itr_setting = wx->rx_itr_setting;

	for (i = 0; i < wx->num_q_vectors; i++) {
		q_vector = wx->q_vector[i];
		if (q_vector->tx.count && !q_vector->rx.count)
			/* tx only */
			q_vector->itr = tx_itr_param;
		else
			/* rx only or mixed */
			q_vector->itr = rx_itr_param;
		wx_write_eitr(q_vector);
	}

	return 0;
}
EXPORT_SYMBOL(wx_set_coalesce);

u32 wx_get_msglevel(struct net_device *netdev)
{
	struct wx *wx = netdev_priv(netdev);

	return wx->msg_enable;
}
EXPORT_SYMBOL(wx_get_msglevel);

void wx_set_msglevel(struct net_device *netdev, u32 data)
{
	struct wx *wx = netdev_priv(netdev);

	wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);
+31 −0
Original line number Diff line number Diff line
@@ -4,5 +4,36 @@
#ifndef _WX_ETHTOOL_H_
#define _WX_ETHTOOL_H_

int wx_get_sset_count(struct net_device *netdev, int sset);
void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data);
void wx_get_ethtool_stats(struct net_device *netdev,
			  struct ethtool_stats *stats, u64 *data);
void wx_get_mac_stats(struct net_device *netdev,
		      struct ethtool_eth_mac_stats *mac_stats);
void wx_get_pause_stats(struct net_device *netdev,
			struct ethtool_pause_stats *stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
int wx_nway_reset(struct net_device *netdev);
int wx_get_link_ksettings(struct net_device *netdev,
			  struct ethtool_link_ksettings *cmd);
int wx_set_link_ksettings(struct net_device *netdev,
			  const struct ethtool_link_ksettings *cmd);
void wx_get_pauseparam(struct net_device *netdev,
		       struct ethtool_pauseparam *pause);
int wx_set_pauseparam(struct net_device *netdev,
		      struct ethtool_pauseparam *pause);
void wx_get_ringparam(struct net_device *netdev,
		      struct ethtool_ringparam *ring,
		      struct kernel_ethtool_ringparam *kernel_ring,
		      struct netlink_ext_ack *extack);
int wx_get_coalesce(struct net_device *netdev,
		    struct ethtool_coalesce *ec,
		    struct kernel_ethtool_coalesce *kernel_coal,
		    struct netlink_ext_ack *extack);
int wx_set_coalesce(struct net_device *netdev,
		    struct ethtool_coalesce *ec,
		    struct kernel_ethtool_coalesce *kernel_coal,
		    struct netlink_ext_ack *extack);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
#endif /* _WX_ETHTOOL_H_ */
+363 −0
Original line number Diff line number Diff line
@@ -12,6 +12,98 @@
#include "wx_lib.h"
#include "wx_hw.h"

static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
{
	struct wx *wx = bus->priv;
	u32 command, val;
	int ret;

	/* setup and write the address cycle command */
	command = WX_MSCA_RA(regnum) |
		  WX_MSCA_PA(phy_addr) |
		  WX_MSCA_DA(devnum);
	wr32(wx, WX_MSCA, command);

	command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
	if (wx->mac.type == wx_mac_em)
		command |= WX_MDIO_CLK(6);
	wr32(wx, WX_MSCC, command);

	/* wait to complete */
	ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
				100000, false, wx, WX_MSCC);
	if (ret) {
		wx_err(wx, "Mdio read c22 command did not complete.\n");
		return ret;
	}

	return (u16)rd32(wx, WX_MSCC);
}

static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr,
				int devnum, int regnum, u16 value)
{
	struct wx *wx = bus->priv;
	u32 command, val;
	int ret;

	/* setup and write the address cycle command */
	command = WX_MSCA_RA(regnum) |
		  WX_MSCA_PA(phy_addr) |
		  WX_MSCA_DA(devnum);
	wr32(wx, WX_MSCA, command);

	command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
	if (wx->mac.type == wx_mac_em)
		command |= WX_MDIO_CLK(6);
	wr32(wx, WX_MSCC, command);

	/* wait to complete */
	ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
				100000, false, wx, WX_MSCC);
	if (ret)
		wx_err(wx, "Mdio write c22 command did not complete.\n");

	return ret;
}

int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
{
	struct wx *wx = bus->priv;

	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
	return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum);
}
EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22);

int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
{
	struct wx *wx = bus->priv;

	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
	return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value);
}
EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22);

int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
{
	struct wx *wx = bus->priv;

	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
	return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum);
}
EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45);

int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
			     int devnum, int regnum, u16 value)
{
	struct wx *wx = bus->priv;

	wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
	return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value);
}
EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45);

static void wx_intr_disable(struct wx *wx, u64 qmask)
{
	u32 mask;
@@ -1066,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx)
	wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
}

#define WX_ETH_FRAMING 20

/**
 * wx_hpbthresh - calculate high water mark for flow control
 *
 * @wx: board private structure to calculate for
 **/
static int wx_hpbthresh(struct wx *wx)
{
	struct net_device *dev = wx->netdev;
	int link, tc, kb, marker;
	u32 dv_id, rx_pba;

	/* Calculate max LAN frame size */
	link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
	tc = link;

	/* Calculate delay value for device */
	dv_id = WX_DV(link, tc);

	/* Delay value is calculated in bit times convert to KB */
	kb = WX_BT2KB(dv_id);
	rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;

	marker = rx_pba - kb;

	/* It is possible that the packet buffer is not large enough
	 * to provide required headroom. In this case throw an error
	 * to user and a do the best we can.
	 */
	if (marker < 0) {
		dev_warn(&wx->pdev->dev,
			 "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
		marker = tc + 1;
	}

	return marker;
}

/**
 * wx_lpbthresh - calculate low water mark for flow control
 *
 * @wx: board private structure to calculate for
 **/
static int wx_lpbthresh(struct wx *wx)
{
	struct net_device *dev = wx->netdev;
	u32 dv_id;
	int tc;

	/* Calculate max LAN frame size */
	tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;

	/* Calculate delay value for device */
	dv_id = WX_LOW_DV(tc);

	/* Delay value is calculated in bit times convert to KB */
	return WX_BT2KB(dv_id);
}

/**
 * wx_pbthresh_setup - calculate and setup high low water marks
 *
 * @wx: board private structure to calculate for
 **/
static void wx_pbthresh_setup(struct wx *wx)
{
	wx->fc.high_water = wx_hpbthresh(wx);
	wx->fc.low_water = wx_lpbthresh(wx);

	/* Low water marks must not be larger than high water marks */
	if (wx->fc.low_water > wx->fc.high_water)
		wx->fc.low_water = 0;
}

static void wx_configure_port(struct wx *wx)
{
	u32 value, i;
@@ -1492,6 +1659,7 @@ static void wx_configure_isb(struct wx *wx)
void wx_configure(struct wx *wx)
{
	wx_set_rxpba(wx);
	wx_pbthresh_setup(wx);
	wx_configure_port(wx);

	wx_set_rx_mode(wx->netdev);
@@ -1911,6 +2079,201 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
}
EXPORT_SYMBOL(wx_vlan_rx_kill_vid);

static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
{
	u16 reg_idx = ring->reg_idx;
	u32 srrctl;

	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
	srrctl |= WX_PX_RR_CFG_DROP_EN;

	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
}

static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
{
	u16 reg_idx = ring->reg_idx;
	u32 srrctl;

	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
	srrctl &= ~WX_PX_RR_CFG_DROP_EN;

	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
}

int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
{
	u16 pause_time = WX_DEFAULT_FCPAUSE;
	u32 mflcn_reg, fccfg_reg, reg;
	u32 fcrtl, fcrth;
	int i;

	/* Low water mark of zero causes XOFF floods */
	if (tx_pause && wx->fc.high_water) {
		if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
			wx_err(wx, "Invalid water mark configuration\n");
			return -EINVAL;
		}
	}

	/* Disable any previous flow control settings */
	mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
	mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;

	fccfg_reg = rd32(wx, WX_RDB_RFCC);
	fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;

	if (rx_pause)
		mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
	if (tx_pause)
		fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;

	/* Set 802.3x based flow control settings. */
	wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
	wr32(wx, WX_RDB_RFCC, fccfg_reg);

	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
	if (tx_pause && wx->fc.high_water) {
		fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
		wr32(wx, WX_RDB_RFCL, fcrtl);
		fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
	} else {
		wr32(wx, WX_RDB_RFCL, 0);
		/* In order to prevent Tx hangs when the internal Tx
		 * switch is enabled we must set the high water mark
		 * to the Rx packet buffer size - 24KB.  This allows
		 * the Tx switch to function even under heavy Rx
		 * workloads.
		 */
		fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
	}

	wr32(wx, WX_RDB_RFCH, fcrth);

	/* Configure pause time */
	reg = pause_time * 0x00010001;
	wr32(wx, WX_RDB_RFCV, reg);

	/* Configure flow control refresh threshold value */
	wr32(wx, WX_RDB_RFCRT, pause_time / 2);

	/*  We should set the drop enable bit if:
	 *  Number of Rx queues > 1 and flow control is disabled
	 *
	 *  This allows us to avoid head of line blocking for security
	 *  and performance reasons.
	 */
	if (wx->num_rx_queues > 1 && !tx_pause) {
		for (i = 0; i < wx->num_rx_queues; i++)
			wx_enable_rx_drop(wx, wx->rx_ring[i]);
	} else {
		for (i = 0; i < wx->num_rx_queues; i++)
			wx_disable_rx_drop(wx, wx->rx_ring[i]);
	}

	return 0;
}
EXPORT_SYMBOL(wx_fc_enable);

/**
 * wx_update_stats - Update the board statistics counters.
 * @wx: board private structure
 **/
void wx_update_stats(struct wx *wx)
{
	struct wx_hw_stats *hwstats = &wx->stats;

	u64 non_eop_descs = 0, alloc_rx_buff_failed = 0;
	u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0;
	u64 restart_queue = 0, tx_busy = 0;
	u32 i;

	/* gather some stats to the wx struct that are per queue */
	for (i = 0; i < wx->num_rx_queues; i++) {
		struct wx_ring *rx_ring = wx->rx_ring[i];

		non_eop_descs += rx_ring->rx_stats.non_eop_descs;
		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
		hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt;
		hw_csum_rx_error += rx_ring->rx_stats.csum_err;
	}
	wx->non_eop_descs = non_eop_descs;
	wx->alloc_rx_buff_failed = alloc_rx_buff_failed;
	wx->hw_csum_rx_error = hw_csum_rx_error;
	wx->hw_csum_rx_good = hw_csum_rx_good;

	for (i = 0; i < wx->num_tx_queues; i++) {
		struct wx_ring *tx_ring = wx->tx_ring[i];

		restart_queue += tx_ring->tx_stats.restart_queue;
		tx_busy += tx_ring->tx_stats.tx_busy;
	}
	wx->restart_queue = restart_queue;
	wx->tx_busy = tx_busy;

	hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT);
	hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT);
	hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB);
	hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB);
	hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
	hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
	hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
	hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
	hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
	hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
	hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
	hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
	hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
	hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
	hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC);
	hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC);
	hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC);
	hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT);
	hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT);
	hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT);
	hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
	hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);

	for (i = 0; i < wx->mac.max_rx_queues; i++)
		hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
}
EXPORT_SYMBOL(wx_update_stats);

/**
 *  wx_clear_hw_cntrs - Generic clear hardware counters
 *  @wx: board private structure
 *
 *  Clears all hardware statistics counters by reading them from the hardware
 *  Statistics counters are clear on read.
 **/
void wx_clear_hw_cntrs(struct wx *wx)
{
	u16 i = 0;

	for (i = 0; i < wx->mac.max_rx_queues; i++)
		wr32(wx, WX_PX_MPRC(i), 0);

	rd32(wx, WX_RDM_PKT_CNT);
	rd32(wx, WX_TDM_PKT_CNT);
	rd64(wx, WX_RDM_BYTE_CNT_LSB);
	rd32(wx, WX_TDM_BYTE_CNT_LSB);
	rd32(wx, WX_RDM_DRP_PKT);
	rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
	rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
	rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
	rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
	rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
	rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
	rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
	rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
	rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
	rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
	rd32(wx, WX_RDB_LXONTXC);
	rd32(wx, WX_RDB_LXOFFTXC);
	rd32(wx, WX_MAC_LXONOFFRXC);
}
EXPORT_SYMBOL(wx_clear_hw_cntrs);

/**
 *  wx_start_hw - Prepare hardware for Tx/Rx
 *  @wx: pointer to hardware structure
+10 −0
Original line number Diff line number Diff line
@@ -4,6 +4,13 @@
#ifndef _WX_HW_H_
#define _WX_HW_H_

#include <linux/phy.h>

int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum);
int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value);
int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum);
int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
			     int devnum, int regnum, u16 value);
void wx_intr_enable(struct wx *wx, u64 qmask);
void wx_irq_disable(struct wx *wx);
int wx_check_flash_load(struct wx *wx, u32 check_bit);
@@ -34,5 +41,8 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause);
void wx_update_stats(struct wx *wx);
void wx_clear_hw_cntrs(struct wx *wx);

#endif /* _WX_HW_H_ */
Loading