Loading include/linux/netdevice.h +10 −0 Original line number Diff line number Diff line Loading @@ -261,6 +261,8 @@ enum netdev_state_t __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, __LINK_STATE_QDISC_RUNNING, /* Set by the netpoll NAPI code */ __LINK_STATE_POLL_LIST_FROZEN, }; Loading Loading @@ -1014,6 +1016,14 @@ static inline void netif_rx_complete(struct net_device *dev) { unsigned long flags; #ifdef CONFIG_NETPOLL /* Prevent race with netpoll - yes, this is a kludge. * But at least it doesn't penalize the non-netpoll * code path. */ if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state)) return; #endif local_irq_save(flags); __netif_rx_complete(dev); local_irq_restore(flags); Loading net/core/netpoll.c +8 −0 Original line number Diff line number Diff line Loading @@ -124,6 +124,13 @@ static void poll_napi(struct netpoll *np) if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && npinfo->poll_owner != smp_processor_id() && spin_trylock(&npinfo->poll_lock)) { /* When calling dev->poll from poll_napi, we may end up in * netif_rx_complete. However, only the CPU to which the * device was queued is allowed to remove it from poll_list. * Setting POLL_LIST_FROZEN tells netif_rx_complete * to leave the NAPI state alone. */ set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); Loading @@ -131,6 +138,7 @@ static void poll_napi(struct netpoll *np) atomic_dec(&trapped); npinfo->rx_flags &= ~NETPOLL_RX_DROP; clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); spin_unlock(&npinfo->poll_lock); } } Loading Loading
include/linux/netdevice.h +10 −0 Original line number Diff line number Diff line Loading @@ -261,6 +261,8 @@ enum netdev_state_t __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, __LINK_STATE_QDISC_RUNNING, /* Set by the netpoll NAPI code */ __LINK_STATE_POLL_LIST_FROZEN, }; Loading Loading @@ -1014,6 +1016,14 @@ static inline void netif_rx_complete(struct net_device *dev) { unsigned long flags; #ifdef CONFIG_NETPOLL /* Prevent race with netpoll - yes, this is a kludge. * But at least it doesn't penalize the non-netpoll * code path. */ if (test_bit(__LINK_STATE_POLL_LIST_FROZEN, &dev->state)) return; #endif local_irq_save(flags); __netif_rx_complete(dev); local_irq_restore(flags); Loading
net/core/netpoll.c +8 −0 Original line number Diff line number Diff line Loading @@ -124,6 +124,13 @@ static void poll_napi(struct netpoll *np) if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && npinfo->poll_owner != smp_processor_id() && spin_trylock(&npinfo->poll_lock)) { /* When calling dev->poll from poll_napi, we may end up in * netif_rx_complete. However, only the CPU to which the * device was queued is allowed to remove it from poll_list. * Setting POLL_LIST_FROZEN tells netif_rx_complete * to leave the NAPI state alone. */ set_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); Loading @@ -131,6 +138,7 @@ static void poll_napi(struct netpoll *np) atomic_dec(&trapped); npinfo->rx_flags &= ~NETPOLL_RX_DROP; clear_bit(__LINK_STATE_POLL_LIST_FROZEN, &np->dev->state); spin_unlock(&npinfo->poll_lock); } } Loading