Commit 10d6393d authored by Julian Wiedmann's avatar Julian Wiedmann Committed by David S. Miller
Browse files

net/af_iucv: support drop monitoring



Change the good paths to use consume_skb() instead of kfree_skb(). This
avoids flooding dropwatch with false-positives.

Signed-off-by: default avatarJulian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 00335237
Loading
Loading
Loading
Loading
+22 −20
Original line number Diff line number Diff line
@@ -1044,7 +1044,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
			if (err == 0) {
				atomic_dec(&iucv->skbs_in_xmit);
				skb_unlink(skb, &iucv->send_skb_q);
				kfree_skb(skb);
				consume_skb(skb);
			}

			/* this error should never happen since the	*/
@@ -1293,7 +1293,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
			}
		}

		kfree_skb(skb);
		consume_skb(skb);
		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
			atomic_inc(&iucv->msg_recv);
			if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
@@ -1756,7 +1756,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
	spin_unlock_irqrestore(&list->lock, flags);

	if (this) {
		kfree_skb(this);
		consume_skb(this);
		/* wake up any process waiting for sending */
		iucv_sock_wake_msglim(sk);
	}
@@ -1903,17 +1903,17 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{
	struct iucv_sock *iucv = iucv_sk(sk);

	if (!iucv)
		goto out;
	if (sk->sk_state != IUCV_BOUND)
		goto out;
	if (!iucv || sk->sk_state != IUCV_BOUND) {
		kfree_skb(skb);
		return NET_RX_SUCCESS;
	}

	bh_lock_sock(sk);
	iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
	sk->sk_state = IUCV_CONNECTED;
	sk->sk_state_change(sk);
	bh_unlock_sock(sk);
out:
	kfree_skb(skb);
	consume_skb(skb);
	return NET_RX_SUCCESS;
}

@@ -1924,16 +1924,16 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
{
	struct iucv_sock *iucv = iucv_sk(sk);

	if (!iucv)
		goto out;
	if (sk->sk_state != IUCV_BOUND)
		goto out;
	if (!iucv || sk->sk_state != IUCV_BOUND) {
		kfree_skb(skb);
		return NET_RX_SUCCESS;
	}

	bh_lock_sock(sk);
	sk->sk_state = IUCV_DISCONN;
	sk->sk_state_change(sk);
	bh_unlock_sock(sk);
out:
	kfree_skb(skb);
	consume_skb(skb);
	return NET_RX_SUCCESS;
}

@@ -1945,16 +1945,18 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
	struct iucv_sock *iucv = iucv_sk(sk);

	/* other end of connection closed */
	if (!iucv)
		goto out;
	if (!iucv) {
		kfree_skb(skb);
		return NET_RX_SUCCESS;
	}

	bh_lock_sock(sk);
	if (sk->sk_state == IUCV_CONNECTED) {
		sk->sk_state = IUCV_DISCONN;
		sk->sk_state_change(sk);
	}
	bh_unlock_sock(sk);
out:
	kfree_skb(skb);
	consume_skb(skb);
	return NET_RX_SUCCESS;
}

@@ -2107,7 +2109,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
	case (AF_IUCV_FLAG_WIN):
		err = afiucv_hs_callback_win(sk, skb);
		if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
			kfree_skb(skb);
			consume_skb(skb);
			break;
		}
		fallthrough;	/* and receive non-zero length data */