Commit d094c985 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Alexei Starovoitov
Browse files

net: mvneta: simplify mvneta_swbm_add_rx_fragment management



Relying on xdp frags bit, remove skb_shared_info structure
allocated on the stack in mvneta_rx_swbm routine and simplify
mvneta_swbm_add_rx_fragment accessing skb_shared_info in the
xdp_buff structure directly. There is no performance penalty in
this approach since mvneta_swbm_add_rx_fragment is run just
for xdp frags use-case.

Acked-by: default avatarToke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/45f050c094ccffce49d6bc5112939ed35250ba90.1642758637.git.lorenzo@kernel.org


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 76a67694
Loading
Loading
Loading
Loading
+15 −27
Original line number Diff line number Diff line
@@ -2060,9 +2060,9 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)

static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		    struct xdp_buff *xdp, struct skb_shared_info *sinfo,
		    int sync_len)
		    struct xdp_buff *xdp, int sync_len)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	int i;

	if (likely(!xdp_buff_has_frags(xdp)))
@@ -2210,7 +2210,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	       struct bpf_prog *prog, struct xdp_buff *xdp,
	       u32 frame_sz, struct mvneta_stats *stats)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	unsigned int len, data_len, sync;
	u32 ret, act;

@@ -2231,7 +2230,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,

		err = xdp_do_redirect(pp->dev, xdp, prog);
		if (unlikely(err)) {
			mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
			ret = MVNETA_XDP_DROPPED;
		} else {
			ret = MVNETA_XDP_REDIR;
@@ -2242,7 +2241,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	case XDP_TX:
		ret = mvneta_xdp_xmit_back(pp, xdp);
		if (ret != MVNETA_XDP_TX)
			mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
			mvneta_xdp_put_buff(pp, rxq, xdp, sync);
		break;
	default:
		bpf_warn_invalid_xdp_action(pp->dev, prog, act);
@@ -2251,7 +2250,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		trace_xdp_exception(pp->dev, prog, act);
		fallthrough;
	case XDP_DROP:
		mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
		mvneta_xdp_put_buff(pp, rxq, xdp, sync);
		ret = MVNETA_XDP_DROPPED;
		stats->xdp_drop++;
		break;
@@ -2303,9 +2302,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc,
			    struct mvneta_rx_queue *rxq,
			    struct xdp_buff *xdp, int *size,
			    struct skb_shared_info *xdp_sinfo,
			    struct page *page)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	struct net_device *dev = pp->dev;
	enum dma_data_direction dma_dir;
	int data_len, len;
@@ -2323,8 +2322,11 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
				len, dma_dir);
	rx_desc->buf_phys_addr = 0;

	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
	if (!xdp_buff_has_frags(xdp))
		sinfo->nr_frags = 0;

	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];

		skb_frag_off_set(frag, pp->rx_offset_correction);
		skb_frag_size_set(frag, data_len);
@@ -2335,16 +2337,6 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
	} else {
		page_pool_put_full_page(rxq->page_pool, page, true);
	}

	/* last fragment */
	if (len == *size) {
		struct skb_shared_info *sinfo;

		sinfo = xdp_get_shared_info_from_buff(xdp);
		sinfo->nr_frags = xdp_sinfo->nr_frags;
		memcpy(sinfo->frags, xdp_sinfo->frags,
		       sinfo->nr_frags * sizeof(skb_frag_t));
	}
	*size -= len;
}

@@ -2392,7 +2384,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{
	int rx_proc = 0, rx_todo, refill, size = 0;
	struct net_device *dev = pp->dev;
	struct skb_shared_info sinfo;
	struct mvneta_stats ps = {};
	struct bpf_prog *xdp_prog;
	u32 desc_status, frame_sz;
@@ -2401,8 +2392,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
	xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
	xdp_buf.data_hard_start = NULL;

	sinfo.nr_frags = 0;

	/* Get number of received packets */
	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);

@@ -2444,7 +2433,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
			}

			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
						    &size, &sinfo, page);
						    &size, page);
		} /* Middle or Last descriptor */

		if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2452,7 +2441,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
			continue;

		if (size) {
			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
			goto next;
		}

@@ -2464,7 +2453,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
		if (IS_ERR(skb)) {
			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);

			u64_stats_update_begin(&stats->syncp);
			stats->es.skb_alloc_error++;
@@ -2481,11 +2470,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
		napi_gro_receive(napi, skb);
next:
		xdp_buf.data_hard_start = NULL;
		sinfo.nr_frags = 0;
	}

	if (xdp_buf.data_hard_start)
		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);

	if (ps.xdp_redirect)
		xdp_do_flush_map();