Commit 7c26c20d authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by David S. Miller
Browse files

net: ethernet: mtk_eth_soc: add basic XDP support



Introduce basic XDP support to mtk_eth_soc driver.
Supported XDP verdicts:
- XDP_PASS
- XDP_DROP
- XDP_REDIRECT

Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 23233e57
Loading
Loading
Loading
Loading
+143 −19
Original line number Diff line number Diff line
@@ -1432,6 +1432,11 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
	}
}

static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
	return !eth->hwlro;
}

static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
					      struct xdp_rxq_info *xdp_q,
					      int id, int size)
@@ -1494,11 +1499,52 @@ static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
		skb_free_frag(data);
}

static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
		       struct xdp_buff *xdp, struct net_device *dev)
{
	struct bpf_prog *prog;
	u32 act = XDP_PASS;

	rcu_read_lock();

	prog = rcu_dereference(eth->prog);
	if (!prog)
		goto out;

	act = bpf_prog_run_xdp(prog, xdp);
	switch (act) {
	case XDP_PASS:
		goto out;
	case XDP_REDIRECT:
		if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
			act = XDP_DROP;
			break;
		}
		goto out;
	default:
		bpf_warn_invalid_xdp_action(dev, prog, act);
		fallthrough;
	case XDP_ABORTED:
		trace_xdp_exception(dev, prog, act);
		fallthrough;
	case XDP_DROP:
		break;
	}

	page_pool_put_full_page(ring->page_pool,
				virt_to_head_page(xdp->data), true);
out:
	rcu_read_unlock();

	return act;
}

static int mtk_poll_rx(struct napi_struct *napi, int budget,
		       struct mtk_eth *eth)
{
	struct dim_sample dim_sample = {};
	struct mtk_rx_ring *ring;
	bool xdp_flush = false;
	int idx;
	struct sk_buff *skb;
	u8 *data, *new_data;
@@ -1507,9 +1553,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,

	while (done < budget) {
		unsigned int pktlen, *rxdcsum;
		u32 hash, reason, reserve_len;
		struct net_device *netdev;
		dma_addr_t dma_addr;
		u32 hash, reason;
		int mac = 0;

		ring = mtk_get_rx_ring(eth);
@@ -1539,8 +1585,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
			goto release_desc;

		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);

		/* alloc new buffer */
		if (ring->page_pool) {
			struct page *page = virt_to_head_page(data);
			struct xdp_buff xdp;
			u32 ret;

			new_data = mtk_page_pool_get_buff(ring->page_pool,
							  &dma_addr,
							  GFP_ATOMIC);
@@ -1548,6 +1600,34 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
				netdev->stats.rx_dropped++;
				goto release_desc;
			}

			dma_sync_single_for_cpu(eth->dma_dev,
				page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
				pktlen, page_pool_get_dma_dir(ring->page_pool));

			xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
			xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
					 false);
			xdp_buff_clear_frags_flag(&xdp);

			ret = mtk_xdp_run(eth, ring, &xdp, netdev);
			if (ret == XDP_REDIRECT)
				xdp_flush = true;

			if (ret != XDP_PASS)
				goto skip_rx;

			skb = build_skb(data, PAGE_SIZE);
			if (unlikely(!skb)) {
				page_pool_put_full_page(ring->page_pool,
							page, true);
				netdev->stats.rx_dropped++;
				goto skip_rx;
			}

			skb_reserve(skb, xdp.data - xdp.data_hard_start);
			skb_put(skb, xdp.data_end - xdp.data);
			skb_mark_for_recycle(skb);
		} else {
			if (ring->frag_size <= PAGE_SIZE)
				new_data = napi_alloc_frag(ring->frag_size);
@@ -1571,27 +1651,20 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,

			dma_unmap_single(eth->dma_dev, trxd.rxd1,
					 ring->buf_size, DMA_FROM_DEVICE);
		}

		/* receive data */
			skb = build_skb(data, ring->frag_size);
			if (unlikely(!skb)) {
			mtk_rx_put_buff(ring, data, true);
				netdev->stats.rx_dropped++;
				skb_free_frag(data);
				goto skip_rx;
			}

		if (ring->page_pool) {
			reserve_len = MTK_PP_HEADROOM;
			skb_mark_for_recycle(skb);
		} else {
			reserve_len = NET_SKB_PAD + NET_IP_ALIGN;
			skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
			skb_put(skb, pktlen);
		}
		skb_reserve(skb, reserve_len);

		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
		skb->dev = netdev;
		skb_put(skb, pktlen);
		bytes += skb->len;

		if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
			rxdcsum = &trxd.rxd3;
@@ -1603,7 +1676,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
		else
			skb_checksum_none_assert(skb);
		skb->protocol = eth_type_trans(skb, netdev);
		bytes += pktlen;

		hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
		if (hash != MTK_RXD4_FOE_ENTRY) {
@@ -1666,6 +1738,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
			  &dim_sample);
	net_dim(&eth->rx_dim, dim_sample);

	if (xdp_flush)
		xdp_do_flush_map();

	return done;
}

@@ -2011,7 +2086,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
	if (!ring->data)
		return -ENOMEM;

	if (!eth->hwlro) {
	if (mtk_page_pool_enabled(eth)) {
		struct page_pool *pp;

		pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
@@ -2750,6 +2825,48 @@ static int mtk_stop(struct net_device *dev)
	return 0;
}

static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
			 struct netlink_ext_ack *extack)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;
	struct bpf_prog *old_prog;
	bool need_update;

	if (eth->hwlro) {
		NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
		return -EOPNOTSUPP;
	}

	if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
		return -EOPNOTSUPP;
	}

	need_update = !!eth->prog != !!prog;
	if (netif_running(dev) && need_update)
		mtk_stop(dev);

	old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
	if (old_prog)
		bpf_prog_put(old_prog);

	if (netif_running(dev) && need_update)
		return mtk_open(dev);

	return 0;
}

static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
	switch (xdp->command) {
	case XDP_SETUP_PROG:
		return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
	default:
		return -EINVAL;
	}
}

static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -3045,6 +3162,12 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
	struct mtk_eth *eth = mac->hw;
	u32 mcr_cur, mcr_new;

	if (rcu_access_pointer(eth->prog) &&
	    length > MTK_PP_MAX_BUF_SIZE) {
		netdev_err(dev, "Invalid MTU for XDP mode\n");
		return -EINVAL;
	}

	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
		mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
		mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
@@ -3372,6 +3495,7 @@ static const struct net_device_ops mtk_netdev_ops = {
	.ndo_poll_controller	= mtk_poll_controller,
#endif
	.ndo_setup_tc		= mtk_eth_setup_tc,
	.ndo_bpf		= mtk_xdp,
};

static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+2 −0
Original line number Diff line number Diff line
@@ -1088,6 +1088,8 @@ struct mtk_eth {

	struct mtk_ppe			*ppe;
	struct rhashtable		flow_table;

	struct bpf_prog			__rcu *prog;
};

/* struct mtk_mac -	the structure that holds the info about the MACs of the