Loading MAINTAINERS +0 −1 Original line number Diff line number Diff line Loading @@ -10770,7 +10770,6 @@ L7 BPF FRAMEWORK M: John Fastabend <john.fastabend@gmail.com> M: Daniel Borkmann <daniel@iogearbox.net> M: Jakub Sitnicki <jakub@cloudflare.com> M: Lorenz Bauer <lmb@cloudflare.com> L: netdev@vger.kernel.org L: bpf@vger.kernel.org S: Maintained Loading drivers/net/ethernet/broadcom/genet/bcmgenet.c +2 −2 Original line number Diff line number Diff line Loading @@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, offset); else writel_relaxed(value, offset); writel(value, offset); } static inline u32 bcmgenet_readl(void __iomem *offset) Loading @@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(offset); else return readl_relaxed(offset); return readl(offset); } static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, Loading drivers/net/ethernet/ibm/ibmvnic.c +50 −13 Original line number Diff line number Diff line Loading @@ -1430,6 +1430,15 @@ static int __ibmvnic_open(struct net_device *netdev) return rc; } adapter->tx_queues_active = true; /* Since queues were stopped until now, there shouldn't be any * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we * don't need the synchronize_rcu()? Leaving it for consistency * with setting ->tx_queues_active = false. */ synchronize_rcu(); netif_tx_start_all_queues(netdev); if (prev_state == VNIC_CLOSED) { Loading Loading @@ -1604,6 +1613,14 @@ static void ibmvnic_cleanup(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); /* ensure that transmissions are stopped if called by do_reset */ adapter->tx_queues_active = false; /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active * update so they don't restart a queue after we stop it below. */ synchronize_rcu(); if (test_bit(0, &adapter->resetting)) netif_tx_disable(netdev); else Loading Loading @@ -1843,15 +1860,22 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, tx_buff->skb = NULL; adapter->netdev->stats.tx_dropped++; } ind_bufp->index = 0; if (atomic_sub_return(entries, &tx_scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, queue_num) && !test_bit(0, &adapter->resetting)) { __netif_subqueue_stopped(adapter->netdev, queue_num)) { rcu_read_lock(); if (adapter->tx_queues_active) { netif_wake_subqueue(adapter->netdev, queue_num); netdev_dbg(adapter->netdev, "Started queue %d\n", queue_num); } rcu_read_unlock(); } } static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, Loading Loading @@ -1905,11 +1929,12 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) int index = 0; u8 proto = 0; tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, queue_num); ind_bufp = &tx_scrq->ind_buf; if (test_bit(0, &adapter->resetting)) { /* If a reset is in progress, drop the packet since * the scrqs may get torn down. Otherwise use the * rcu to ensure reset waits for us to complete. */ rcu_read_lock(); if (!adapter->tx_queues_active) { dev_kfree_skb_any(skb); tx_send_failed++; Loading @@ -1918,6 +1943,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) goto out; } tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, queue_num); ind_bufp = &tx_scrq->ind_buf; if (ibmvnic_xmit_workarounds(skb, netdev)) { tx_dropped++; tx_send_failed++; Loading @@ -1925,6 +1954,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } if (skb_is_gso(skb)) tx_pool = &adapter->tso_pool[queue_num]; else Loading Loading @@ -2079,6 +2109,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) netif_carrier_off(netdev); } out: rcu_read_unlock(); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; netdev->stats.tx_packets += tx_packets; Loading Loading @@ -3749,9 +3780,15 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, scrq->pool_index)) { netif_wake_subqueue(adapter->netdev, scrq->pool_index); netdev_dbg(adapter->netdev, "Started queue %d\n", rcu_read_lock(); if (adapter->tx_queues_active) { netif_wake_subqueue(adapter->netdev, scrq->pool_index); netdev_dbg(adapter->netdev, "Started queue %d\n", scrq->pool_index); } rcu_read_unlock(); } } Loading drivers/net/ethernet/ibm/ibmvnic.h +5 −2 Original line number Diff line number Diff line Loading @@ -1006,11 +1006,14 @@ struct ibmvnic_adapter { struct work_struct ibmvnic_reset; struct delayed_work ibmvnic_delayed_reset; unsigned long resetting; bool napi_enabled, from_passive_init; bool login_pending; /* last device reset time */ unsigned long last_reset_time; bool napi_enabled; bool from_passive_init; bool login_pending; /* protected by rcu */ bool tx_queues_active; bool failover_pending; bool force_reset_recovery; Loading drivers/net/ethernet/intel/ice/ice.h +2 −0 Original line number Diff line number Diff line Loading @@ -290,6 +290,7 @@ enum ice_pf_state { ICE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_PHY_INIT_COMPLETE, ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ ICE_AUX_ERR_PENDING, ICE_STATE_NBITS /* must be last */ }; Loading Loading @@ -557,6 +558,7 @@ struct ice_pf { wait_queue_head_t reset_wait_queue; u32 hw_csum_rx_error; u32 oicr_err_reg; u16 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u16 max_pf_txqs; /* Total Tx queues PF wide */ Loading Loading
MAINTAINERS +0 −1 Original line number Diff line number Diff line Loading @@ -10770,7 +10770,6 @@ L7 BPF FRAMEWORK M: John Fastabend <john.fastabend@gmail.com> M: Daniel Borkmann <daniel@iogearbox.net> M: Jakub Sitnicki <jakub@cloudflare.com> M: Lorenz Bauer <lmb@cloudflare.com> L: netdev@vger.kernel.org L: bpf@vger.kernel.org S: Maintained Loading
drivers/net/ethernet/broadcom/genet/bcmgenet.c +2 −2 Original line number Diff line number Diff line Loading @@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, offset); else writel_relaxed(value, offset); writel(value, offset); } static inline u32 bcmgenet_readl(void __iomem *offset) Loading @@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(offset); else return readl_relaxed(offset); return readl(offset); } static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, Loading
drivers/net/ethernet/ibm/ibmvnic.c +50 −13 Original line number Diff line number Diff line Loading @@ -1430,6 +1430,15 @@ static int __ibmvnic_open(struct net_device *netdev) return rc; } adapter->tx_queues_active = true; /* Since queues were stopped until now, there shouldn't be any * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we * don't need the synchronize_rcu()? Leaving it for consistency * with setting ->tx_queues_active = false. */ synchronize_rcu(); netif_tx_start_all_queues(netdev); if (prev_state == VNIC_CLOSED) { Loading Loading @@ -1604,6 +1613,14 @@ static void ibmvnic_cleanup(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); /* ensure that transmissions are stopped if called by do_reset */ adapter->tx_queues_active = false; /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active * update so they don't restart a queue after we stop it below. */ synchronize_rcu(); if (test_bit(0, &adapter->resetting)) netif_tx_disable(netdev); else Loading Loading @@ -1843,15 +1860,22 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, tx_buff->skb = NULL; adapter->netdev->stats.tx_dropped++; } ind_bufp->index = 0; if (atomic_sub_return(entries, &tx_scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, queue_num) && !test_bit(0, &adapter->resetting)) { __netif_subqueue_stopped(adapter->netdev, queue_num)) { rcu_read_lock(); if (adapter->tx_queues_active) { netif_wake_subqueue(adapter->netdev, queue_num); netdev_dbg(adapter->netdev, "Started queue %d\n", queue_num); } rcu_read_unlock(); } } static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, Loading Loading @@ -1905,11 +1929,12 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) int index = 0; u8 proto = 0; tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, queue_num); ind_bufp = &tx_scrq->ind_buf; if (test_bit(0, &adapter->resetting)) { /* If a reset is in progress, drop the packet since * the scrqs may get torn down. Otherwise use the * rcu to ensure reset waits for us to complete. */ rcu_read_lock(); if (!adapter->tx_queues_active) { dev_kfree_skb_any(skb); tx_send_failed++; Loading @@ -1918,6 +1943,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) goto out; } tx_scrq = adapter->tx_scrq[queue_num]; txq = netdev_get_tx_queue(netdev, queue_num); ind_bufp = &tx_scrq->ind_buf; if (ibmvnic_xmit_workarounds(skb, netdev)) { tx_dropped++; tx_send_failed++; Loading @@ -1925,6 +1954,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } if (skb_is_gso(skb)) tx_pool = &adapter->tso_pool[queue_num]; else Loading Loading @@ -2079,6 +2109,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) netif_carrier_off(netdev); } out: rcu_read_unlock(); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; netdev->stats.tx_packets += tx_packets; Loading Loading @@ -3749,9 +3780,15 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, scrq->pool_index)) { netif_wake_subqueue(adapter->netdev, scrq->pool_index); netdev_dbg(adapter->netdev, "Started queue %d\n", rcu_read_lock(); if (adapter->tx_queues_active) { netif_wake_subqueue(adapter->netdev, scrq->pool_index); netdev_dbg(adapter->netdev, "Started queue %d\n", scrq->pool_index); } rcu_read_unlock(); } } Loading
drivers/net/ethernet/ibm/ibmvnic.h +5 −2 Original line number Diff line number Diff line Loading @@ -1006,11 +1006,14 @@ struct ibmvnic_adapter { struct work_struct ibmvnic_reset; struct delayed_work ibmvnic_delayed_reset; unsigned long resetting; bool napi_enabled, from_passive_init; bool login_pending; /* last device reset time */ unsigned long last_reset_time; bool napi_enabled; bool from_passive_init; bool login_pending; /* protected by rcu */ bool tx_queues_active; bool failover_pending; bool force_reset_recovery; Loading
drivers/net/ethernet/intel/ice/ice.h +2 −0 Original line number Diff line number Diff line Loading @@ -290,6 +290,7 @@ enum ice_pf_state { ICE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_PHY_INIT_COMPLETE, ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */ ICE_AUX_ERR_PENDING, ICE_STATE_NBITS /* must be last */ }; Loading Loading @@ -557,6 +558,7 @@ struct ice_pf { wait_queue_head_t reset_wait_queue; u32 hw_csum_rx_error; u32 oicr_err_reg; u16 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u16 max_pf_txqs; /* Total Tx queues PF wide */ Loading