Loading include/linux/netdevice.h +0 −3 Original line number Diff line number Diff line Loading @@ -1113,9 +1113,6 @@ struct softnet_data struct sk_buff *completion_queue; struct napi_struct backlog; #ifdef CONFIG_NET_DMA struct dma_chan *net_dma; #endif }; DECLARE_PER_CPU(struct softnet_data,softnet_data); Loading include/net/netdma.h +0 −11 Original line number Diff line number Diff line Loading @@ -24,17 +24,6 @@ #include <linux/dmaengine.h> #include <linux/skbuff.h> static inline struct dma_chan *get_softnet_dma(void) { struct dma_chan *chan; rcu_read_lock(); chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); rcu_read_unlock(); return chan; } int dma_skb_copy_datagram_iovec(struct dma_chan* chan, struct sk_buff *skb, int offset, struct iovec *to, size_t len, struct dma_pinned_list *pinned_list); Loading net/core/dev.c +0 −40 Original line number Diff line number Diff line Loading @@ -4827,44 +4827,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, } #ifdef CONFIG_NET_DMA /** * net_dma_rebalance - try to maintain one DMA channel per CPU * @net_dma: DMA client and associated data (lock, channels, channel_mask) * * This is called when the number of channels allocated to the net_dma client * changes. The net_dma client tries to have one DMA channel per CPU. */ static void net_dma_rebalance(struct net_dma *net_dma) { unsigned int cpu, i, n, chan_idx; struct dma_chan *chan; if (cpus_empty(net_dma->channel_mask)) { for_each_online_cpu(cpu) rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); return; } i = 0; cpu = first_cpu(cpu_online_map); for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { chan = net_dma->channels[chan_idx]; n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) + (i < (num_online_cpus() % cpus_weight(net_dma->channel_mask)) ? 1 : 0)); while(n) { per_cpu(softnet_data, cpu).net_dma = chan; cpu = next_cpu(cpu, cpu_online_map); n--; } i++; } } /** * netdev_dma_event - event callback for the net_dma_client * @client: should always be net_dma_client Loading Loading @@ -4894,7 +4856,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, ack = DMA_ACK; net_dma->channels[pos] = chan; cpu_set(pos, net_dma->channel_mask); net_dma_rebalance(net_dma); } break; case DMA_RESOURCE_REMOVED: Loading @@ -4909,7 +4870,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, ack = DMA_ACK; cpu_clear(pos, net_dma->channel_mask); net_dma->channels[i] = NULL; net_dma_rebalance(net_dma); } break; default: Loading net/ipv4/tcp.c +2 −2 Original line number Diff line number Diff line Loading @@ -1317,7 +1317,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if ((available < target) && (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { dma_find_channel(DMA_MEMCPY)) { preempt_enable_no_resched(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); Loading Loading @@ -1527,7 +1527,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (!(flags & MSG_TRUNC)) { #ifdef CONFIG_NET_DMA if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); if (tp->ucopy.dma_chan) { tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( Loading net/ipv4/tcp_input.c +1 −1 Original line number Diff line number Diff line Loading @@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, return 0; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { Loading Loading
include/linux/netdevice.h +0 −3 Original line number Diff line number Diff line Loading @@ -1113,9 +1113,6 @@ struct softnet_data struct sk_buff *completion_queue; struct napi_struct backlog; #ifdef CONFIG_NET_DMA struct dma_chan *net_dma; #endif }; DECLARE_PER_CPU(struct softnet_data,softnet_data); Loading
include/net/netdma.h +0 −11 Original line number Diff line number Diff line Loading @@ -24,17 +24,6 @@ #include <linux/dmaengine.h> #include <linux/skbuff.h> static inline struct dma_chan *get_softnet_dma(void) { struct dma_chan *chan; rcu_read_lock(); chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); rcu_read_unlock(); return chan; } int dma_skb_copy_datagram_iovec(struct dma_chan* chan, struct sk_buff *skb, int offset, struct iovec *to, size_t len, struct dma_pinned_list *pinned_list); Loading
net/core/dev.c +0 −40 Original line number Diff line number Diff line Loading @@ -4827,44 +4827,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, } #ifdef CONFIG_NET_DMA /** * net_dma_rebalance - try to maintain one DMA channel per CPU * @net_dma: DMA client and associated data (lock, channels, channel_mask) * * This is called when the number of channels allocated to the net_dma client * changes. The net_dma client tries to have one DMA channel per CPU. */ static void net_dma_rebalance(struct net_dma *net_dma) { unsigned int cpu, i, n, chan_idx; struct dma_chan *chan; if (cpus_empty(net_dma->channel_mask)) { for_each_online_cpu(cpu) rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); return; } i = 0; cpu = first_cpu(cpu_online_map); for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { chan = net_dma->channels[chan_idx]; n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) + (i < (num_online_cpus() % cpus_weight(net_dma->channel_mask)) ? 1 : 0)); while(n) { per_cpu(softnet_data, cpu).net_dma = chan; cpu = next_cpu(cpu, cpu_online_map); n--; } i++; } } /** * netdev_dma_event - event callback for the net_dma_client * @client: should always be net_dma_client Loading Loading @@ -4894,7 +4856,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, ack = DMA_ACK; net_dma->channels[pos] = chan; cpu_set(pos, net_dma->channel_mask); net_dma_rebalance(net_dma); } break; case DMA_RESOURCE_REMOVED: Loading @@ -4909,7 +4870,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, ack = DMA_ACK; cpu_clear(pos, net_dma->channel_mask); net_dma->channels[i] = NULL; net_dma_rebalance(net_dma); } break; default: Loading
net/ipv4/tcp.c +2 −2 Original line number Diff line number Diff line Loading @@ -1317,7 +1317,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if ((available < target) && (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { dma_find_channel(DMA_MEMCPY)) { preempt_enable_no_resched(); tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); Loading Loading @@ -1527,7 +1527,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (!(flags & MSG_TRUNC)) { #ifdef CONFIG_NET_DMA if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); if (tp->ucopy.dma_chan) { tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( Loading
net/ipv4/tcp_input.c +1 −1 Original line number Diff line number Diff line Loading @@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, return 0; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { Loading