Commit 87eff2ec authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: optimize napi_threaded_poll() vs RPS/RFS



We use napi_threaded_poll() in order to reduce our softirq dependency.

We can add a followup of 821eba96 ("net: optimize napi_schedule_rps()")
to further remove the need of firing NET_RX_SOFTIRQ whenever
RPS/RFS are used.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a1aaee7f
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -3194,7 +3194,10 @@ struct softnet_data {
#ifdef CONFIG_RPS
	struct softnet_data	*rps_ipi_list;
#endif

	bool			in_net_rx_action;
	bool			in_napi_threaded_poll;

#ifdef CONFIG_NET_FLOW_LIMIT
	struct sd_flow_limit __rcu *flow_limit;
#endif
+10 −2
Original line number Diff line number Diff line
@@ -4603,10 +4603,10 @@ static void napi_schedule_rps(struct softnet_data *sd)
		sd->rps_ipi_next = mysd->rps_ipi_list;
		mysd->rps_ipi_list = sd;

		/* If not called from net_rx_action()
		/* If not called from net_rx_action() or napi_threaded_poll()
		 * we have to raise NET_RX_SOFTIRQ.
		 */
		if (!mysd->in_net_rx_action)
		if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
			__raise_softirq_irqoff(NET_RX_SOFTIRQ);
		return;
	}
@@ -6631,11 +6631,19 @@ static int napi_threaded_poll(void *data)

			local_bh_disable();
			sd = this_cpu_ptr(&softnet_data);
			sd->in_napi_threaded_poll = true;

			have = netpoll_poll_lock(napi);
			__napi_poll(napi, &repoll);
			netpoll_poll_unlock(have);

			sd->in_napi_threaded_poll = false;
			barrier();

			if (sd_has_rps_ipi_waiting(sd)) {
				local_irq_disable();
				net_rps_action_and_irq_enable(sd);
			}
			skb_defer_free_flush(sd);
			local_bh_enable();