Loading include/linux/if_packet.h +0 −2 Original line number Diff line number Diff line Loading @@ -48,13 +48,11 @@ struct sockaddr_ll #define PACKET_RESERVE 12 #define PACKET_TX_RING 13 #define PACKET_LOSS 14 #define PACKET_GAPDATA 15 struct tpacket_stats { unsigned int tp_packets; unsigned int tp_drops; unsigned int tp_gap; }; struct tpacket_auxdata Loading net/packet/af_packet.c +0 −33 Original line number Diff line number Diff line Loading @@ -524,31 +524,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, return res; } /* * If we've lost frames since the last time we queued one to the * sk_receive_queue, we need to record it here. * This must be called under the protection of the socket lock * to prevent racing with other softirqs and user space */ static inline void record_packet_gap(struct sk_buff *skb, struct packet_sock *po) { /* * We overload the mark field here, since we're about * to enqueue to a receive queue and no body else will * use this field at this point */ skb->mark = po->stats.tp_gap; po->stats.tp_gap = 0; return; } static inline __u32 check_packet_gap(struct sk_buff *skb) { return skb->mark; } /* This function makes lazy skb cloning in hope that most of packets are discarded by BPF. Loading Loading @@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, spin_lock(&sk->sk_receive_queue.lock); po->stats.tp_packets++; record_packet_gap(skb, po); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk, skb->len); Loading @@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, drop_n_acct: spin_lock(&sk->sk_receive_queue.lock); po->stats.tp_drops++; po->stats.tp_gap++; spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: Loading Loading @@ -839,7 +812,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, ring_is_full: po->stats.tp_drops++; po->stats.tp_gap++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk, 0); Loading Loading @@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int copied, err; struct sockaddr_ll *sll; __u32 gap; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) Loading Loading @@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } gap = check_packet_gap(skb); if (gap) put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap); /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. Loading Loading
include/linux/if_packet.h +0 −2 Original line number Diff line number Diff line Loading @@ -48,13 +48,11 @@ struct sockaddr_ll #define PACKET_RESERVE 12 #define PACKET_TX_RING 13 #define PACKET_LOSS 14 #define PACKET_GAPDATA 15 struct tpacket_stats { unsigned int tp_packets; unsigned int tp_drops; unsigned int tp_gap; }; struct tpacket_auxdata Loading
net/packet/af_packet.c +0 −33 Original line number Diff line number Diff line Loading @@ -524,31 +524,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, return res; } /* * If we've lost frames since the last time we queued one to the * sk_receive_queue, we need to record it here. * This must be called under the protection of the socket lock * to prevent racing with other softirqs and user space */ static inline void record_packet_gap(struct sk_buff *skb, struct packet_sock *po) { /* * We overload the mark field here, since we're about * to enqueue to a receive queue and no body else will * use this field at this point */ skb->mark = po->stats.tp_gap; po->stats.tp_gap = 0; return; } static inline __u32 check_packet_gap(struct sk_buff *skb) { return skb->mark; } /* This function makes lazy skb cloning in hope that most of packets are discarded by BPF. Loading Loading @@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, spin_lock(&sk->sk_receive_queue.lock); po->stats.tp_packets++; record_packet_gap(skb, po); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk, skb->len); Loading @@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, drop_n_acct: spin_lock(&sk->sk_receive_queue.lock); po->stats.tp_drops++; po->stats.tp_gap++; spin_unlock(&sk->sk_receive_queue.lock); drop_n_restore: Loading Loading @@ -839,7 +812,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, ring_is_full: po->stats.tp_drops++; po->stats.tp_gap++; spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk, 0); Loading Loading @@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int copied, err; struct sockaddr_ll *sll; __u32 gap; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) Loading Loading @@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } gap = check_packet_gap(skb); if (gap) put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap); /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. Loading