Commit a6bf5703 authored by Haiyang Zhang's avatar Haiyang Zhang Committed by David S. Miller
Browse files

net: mana: Reuse XDP dropped page



Reuse the dropped page in RX path to save page allocation
overhead.

Signed-off-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d356abb9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -310,6 +310,7 @@ struct mana_rxq {

	struct bpf_prog __rcu *bpf_prog;
	struct xdp_rxq_info xdp_rxq;
	struct page *xdp_save_page;

	/* MUST BE THE LAST MEMBER:
	 * Each receive buffer has an associated mana_recv_buf_oob.
+13 −2
Original line number Diff line number Diff line
@@ -1059,7 +1059,9 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
	u64_stats_update_end(&rx_stats->syncp);

drop:
	free_page((unsigned long)buf_va);
	WARN_ON_ONCE(rxq->xdp_save_page);
	rxq->xdp_save_page = virt_to_page(buf_va);

	++ndev->stats.rx_dropped;

	return;
@@ -1116,7 +1118,13 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
	rxbuf_oob = &rxq->rx_oobs[curr];
	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);

	/* Reuse XDP dropped page if available */
	if (rxq->xdp_save_page) {
		new_page = rxq->xdp_save_page;
		rxq->xdp_save_page = NULL;
	} else {
		new_page = alloc_page(GFP_ATOMIC);
	}

	if (new_page) {
		da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
@@ -1403,6 +1411,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,

	mana_deinit_cq(apc, &rxq->rx_cq);

	if (rxq->xdp_save_page)
		__free_page(rxq->xdp_save_page);

	for (i = 0; i < rxq->num_rx_buf; i++) {
		rx_oob = &rxq->rx_oobs[i];