Unverified Commit 7c48e108 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!11246 [olk 5.10] hns3驱动一些bugfix patch

Merge Pull Request from: @chen-hao418 
 
driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IAN3KC
CVE: NA

 
 
Link:https://gitee.com/openeuler/kernel/pulls/11246

 

Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parents 219f14d2 1c6df3ad
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -917,6 +917,7 @@ struct hnae3_tc_info {
	u8 max_tc; /* Total number of TCs */
	u8 num_tc; /* Total number of enabled TCs */
	bool mqprio_active;
	bool mqprio_destroy;
	bool dcb_ets_active;
	u64 max_rate[HNAE3_MAX_TC];     /* Unit Bps */
};
+3 −1
Original line number Diff line number Diff line
@@ -1300,8 +1300,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,

		/* save the buffer addr until the last read operation */
		*save_buf = read_buf;
	}

	/* get data ready for the first time to read */
	if (!*ppos) {
		ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
					read_buf, hns3_dbg_cmd[index].buf_len);
		if (ret)
+61 −1
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/aer.h>
@@ -406,6 +407,24 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
#define HNS3_INVALID_PTYPE \
		ARRAY_SIZE(hns3_rx_ptype_tbl)

static void hns3_dma_map_sync(struct device *dev, unsigned long iova)
{
	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
	struct iommu_iotlb_gather iotlb_gather;
	size_t granule;

	if (!domain || domain->type != IOMMU_DOMAIN_DMA)
		return;

	granule = 1 << __ffs(domain->pgsize_bitmap);
	iova = ALIGN_DOWN(iova, granule);
	iotlb_gather.start = iova;
	iotlb_gather.end = iova + granule - 1;
	iotlb_gather.pgsize = granule;

	iommu_iotlb_sync(domain, &iotlb_gather);
}

static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
	struct hns3_enet_tqp_vector *tqp_vector = vector;
@@ -1058,6 +1077,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
	u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
	struct net_device *netdev = ring_to_netdev(ring);
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hns3_tx_spare *tx_spare;
	struct page *page;
	dma_addr_t dma;
@@ -1099,6 +1120,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
	tx_spare->buf = page_address(page);
	tx_spare->len = PAGE_SIZE << order;
	ring->tx_spare = tx_spare;
	ring->tx_copybreak = priv->tx_copybreak;
	return;

dma_mapping_error:
@@ -1971,7 +1993,9 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
				  unsigned int type)
{
	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
	struct hnae3_handle *handle = ring->tqp->handle;
	struct device *dev = ring_to_dev(ring);
	struct hnae3_ae_dev *ae_dev;
	unsigned int size;
	dma_addr_t dma;

@@ -2003,6 +2027,13 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
		return -ENOMEM;
	}

	/* Add a SYNC command to sync io-pgtale to avoid errors in pgtable
	 * prefetch
	 */
	ae_dev = hns3_get_ae_dev(handle);
	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
		hns3_dma_map_sync(dev, dma);

	desc_cb->priv = priv;
	desc_cb->length = size;
	desc_cb->dma = dma;
@@ -2727,7 +2758,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
			return ret;
	}

	netdev->features = features;
	return 0;
}

@@ -5248,6 +5278,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
	devm_kfree(&pdev->dev, priv->tqp_vector);
}

static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
{
#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
#define HNS3_MAX_PACKET_SIZE (64 * 1024)

	struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
	struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
	struct hnae3_handle *handle = priv->ae_handle;

	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
		return;

	if (!(domain && domain->type == IOMMU_DOMAIN_DMA))
		return;

	priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
	priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;

	if (priv->tx_copybreak < priv->min_tx_copybreak)
		priv->tx_copybreak = priv->min_tx_copybreak;
	if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
		handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
}

static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
			      unsigned int ring_type)
{
@@ -5487,6 +5541,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
	int i, j;
	int ret;

	hns3_update_tx_spare_buf_config(priv);
	for (i = 0; i < ring_num; i++) {
		ret = hns3_alloc_ring_memory(&priv->ring[i]);
		if (ret) {
@@ -5714,6 +5769,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
	priv->ae_handle = handle;
	priv->tx_timeout_count = 0;
	priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
	priv->min_tx_copybreak = 0;
	priv->min_tx_spare_buf_size = 0;
	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
	eth_zero_addr(priv->roh_perm_mac);

@@ -6172,6 +6229,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
	struct net_device *netdev = handle->kinfo.netdev;
	struct hns3_nic_priv *priv = netdev_priv(netdev);

	if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
		hns3_nic_net_stop(netdev);

	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
		netdev_warn(netdev, "already uninitialized\n");
		return 0;
+2 −0
Original line number Diff line number Diff line
@@ -620,6 +620,8 @@ struct hns3_nic_priv {
	struct hns3_enet_coalesce rx_coal;
	u32 tx_copybreak;
	u32 rx_copybreak;
	u32 min_tx_copybreak;
	u32 min_tx_spare_buf_size;
	u8 roh_perm_mac[ETH_ALEN];
};

+33 −0
Original line number Diff line number Diff line
@@ -1994,6 +1994,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
	return ret;
}

static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);

	if (copybreak < priv->min_tx_copybreak) {
		netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
			   copybreak, priv->min_tx_copybreak);
		return -EINVAL;
	}
	return 0;
}

static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);

	if (buf_size < priv->min_tx_spare_buf_size) {
		netdev_err(netdev,
			   "tx spare buf size %u should be no less than %u!\n",
			   buf_size, priv->min_tx_spare_buf_size);
		return -EINVAL;
	}
	return 0;
}

static int hns3_set_tunable(struct net_device *netdev,
			    const struct ethtool_tunable *tuna,
			    const void *data)
@@ -2010,6 +2035,10 @@ static int hns3_set_tunable(struct net_device *netdev,

	switch (tuna->id) {
	case ETHTOOL_TX_COPYBREAK:
		ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
		if (ret)
			return ret;

		priv->tx_copybreak = *(u32 *)data;

		for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -2024,6 +2053,10 @@ static int hns3_set_tunable(struct net_device *netdev,

		break;
	case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
		ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
		if (ret)
			return ret;

		old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
		new_tx_spare_buf_size = *(u32 *)data;
		netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
Loading