Commit 931e93bd authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: do not provide hard irq safety for sd->defer_lock



kfree_skb() can be called from hard irq handlers,
but skb_attempt_defer_free() is meant to be used
from process or BH contexts, and skb_defer_free_flush()
is meant to be called from BH contexts.

Not having to mask hard irq can save some cycles.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e8e1ce84
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -6632,11 +6632,11 @@ static void skb_defer_free_flush(struct softnet_data *sd)
	if (!READ_ONCE(sd->defer_list))
		return;

	spin_lock_irq(&sd->defer_lock);
	spin_lock(&sd->defer_lock);
	skb = sd->defer_list;
	sd->defer_list = NULL;
	sd->defer_count = 0;
	spin_unlock_irq(&sd->defer_lock);
	spin_unlock(&sd->defer_lock);

	while (skb != NULL) {
		next = skb->next;
+2 −3
Original line number Diff line number Diff line
@@ -6870,7 +6870,6 @@ void skb_attempt_defer_free(struct sk_buff *skb)
{
	int cpu = skb->alloc_cpu;
	struct softnet_data *sd;
	unsigned long flags;
	unsigned int defer_max;
	bool kick;

@@ -6889,7 +6888,7 @@ nodefer: __kfree_skb(skb);
	if (READ_ONCE(sd->defer_count) >= defer_max)
		goto nodefer;

	spin_lock_irqsave(&sd->defer_lock, flags);
	spin_lock_bh(&sd->defer_lock);
	/* Send an IPI every time queue reaches half capacity. */
	kick = sd->defer_count == (defer_max >> 1);
	/* Paired with the READ_ONCE() few lines above */
@@ -6898,7 +6897,7 @@ nodefer: __kfree_skb(skb);
	skb->next = sd->defer_list;
	/* Paired with READ_ONCE() in skb_defer_free_flush() */
	WRITE_ONCE(sd->defer_list, skb);
	spin_unlock_irqrestore(&sd->defer_lock, flags);
	spin_unlock_bh(&sd->defer_lock);

	/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
	 * if we are unlucky enough (this seems very unlikely).