Commit 6c09876c authored by Peiyang Wang's avatar Peiyang Wang Committed by Hao Chen
Browse files

net: hns3: default enable tx bounce buffer when smmu enabled

driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IAN3KC


CVE: NA

----------------------------------------------------------------------

When TX bounce buffer is enabled, dma map is used only when the buffer
inintialized. When spending packages, the driver only do dma sync. To
avoid SMMU prefetch, default enable tx bounce buffer if smmu enabled.

Fixes: 3dd7206f ("net: hns3: use tx bounce buffer for small packets")
Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarPeiyang Wang <wangpeiyang1@huawei.com>
parent 952242db
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/aer.h>
@@ -1054,6 +1055,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
	u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
	struct net_device *netdev = ring_to_netdev(ring);
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hns3_tx_spare *tx_spare;
	struct page *page;
	dma_addr_t dma;
@@ -1095,6 +1098,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
	tx_spare->buf = page_address(page);
	tx_spare->len = PAGE_SIZE << order;
	ring->tx_spare = tx_spare;
	ring->tx_copybreak = priv->tx_copybreak;
	return;

dma_mapping_error:
@@ -5244,6 +5248,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
	devm_kfree(&pdev->dev, priv->tqp_vector);
}

static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
{
#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
#define HNS3_MAX_PACKET_SIZE (64 * 1024)

	struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
	struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
	struct hnae3_handle *handle = priv->ae_handle;

	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
		return;

	if (!(domain && domain->type == IOMMU_DOMAIN_DMA))
		return;

	priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
	priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;

	if (priv->tx_copybreak < priv->min_tx_copybreak)
		priv->tx_copybreak = priv->min_tx_copybreak;
	if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
		handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
}

static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
			      unsigned int ring_type)
{
@@ -5483,6 +5511,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
	int i, j;
	int ret;

	hns3_update_tx_spare_buf_config(priv);
	for (i = 0; i < ring_num; i++) {
		ret = hns3_alloc_ring_memory(&priv->ring[i]);
		if (ret) {
@@ -5710,6 +5739,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
	priv->ae_handle = handle;
	priv->tx_timeout_count = 0;
	priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
	priv->min_tx_copybreak = 0;
	priv->min_tx_spare_buf_size = 0;
	set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
	eth_zero_addr(priv->roh_perm_mac);

+2 −0
Original line number Diff line number Diff line
@@ -620,6 +620,8 @@ struct hns3_nic_priv {
	struct hns3_enet_coalesce rx_coal;
	u32 tx_copybreak;
	u32 rx_copybreak;
	u32 min_tx_copybreak;
	u32 min_tx_spare_buf_size;
	u8 roh_perm_mac[ETH_ALEN];
};

+33 −0
Original line number Diff line number Diff line
@@ -1992,6 +1992,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
	return ret;
}

static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);

	if (copybreak < priv->min_tx_copybreak) {
		netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
			   copybreak, priv->min_tx_copybreak);
		return -EINVAL;
	}
	return 0;
}

static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);

	if (buf_size < priv->min_tx_spare_buf_size) {
		netdev_err(netdev,
			   "tx spare buf size %u should be no less than %u!\n",
			   buf_size, priv->min_tx_spare_buf_size);
		return -EINVAL;
	}
	return 0;
}

static int hns3_set_tunable(struct net_device *netdev,
			    const struct ethtool_tunable *tuna,
			    const void *data)
@@ -2008,6 +2033,10 @@ static int hns3_set_tunable(struct net_device *netdev,

	switch (tuna->id) {
	case ETHTOOL_TX_COPYBREAK:
		ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
		if (ret)
			return ret;

		priv->tx_copybreak = *(u32 *)data;

		for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -2022,6 +2051,10 @@ static int hns3_set_tunable(struct net_device *netdev,

		break;
	case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
		ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
		if (ret)
			return ret;

		old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
		new_tx_spare_buf_size = *(u32 *)data;
		netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",