Commit 8def0be8 authored by Sven Eckelmann's avatar Sven Eckelmann Committed by Simon Wunderlich
Browse files

batman-adv: Consume skb in batadv_frag_send_packet



Sending functions in Linux consume the supplied skbuff. Doing the same in
batadv_frag_send_packet avoids the hack of returning -1 (-EPERM) to signal
the caller that he is responsible for cleaning up the skb.

Signed-off-by: default avatarSven Eckelmann <sven@narfation.org>
Signed-off-by: default avatarSimon Wunderlich <sw@simonwunderlich.de>
parent eaac2c87
Loading
Loading
Loading
Loading
+29 −21
Original line number Original line Diff line number Diff line
@@ -20,6 +20,7 @@


#include <linux/atomic.h>
#include <linux/atomic.h>
#include <linux/byteorder/generic.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
#include <linux/if_ether.h>
@@ -441,8 +442,7 @@ static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
 * @orig_node: final destination of the created fragments
 * @orig_node: final destination of the created fragments
 * @neigh_node: next-hop of the created fragments
 * @neigh_node: next-hop of the created fragments
 *
 *
 * Return: the netdev tx status or -1 in case of error.
 * Return: the netdev tx status or a negative errno code on a failure
 * When -1 is returned the skb is not consumed.
 */
 */
int batadv_frag_send_packet(struct sk_buff *skb,
int batadv_frag_send_packet(struct sk_buff *skb,
			    struct batadv_orig_node *orig_node,
			    struct batadv_orig_node *orig_node,
@@ -455,7 +455,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
	unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
	unsigned int header_size = sizeof(frag_header);
	unsigned int header_size = sizeof(frag_header);
	unsigned int max_fragment_size, max_packet_size;
	unsigned int max_fragment_size, max_packet_size;
	int ret = -1;
	int ret;


	/* To avoid merge and refragmentation at next-hops we never send
	/* To avoid merge and refragmentation at next-hops we never send
	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
	 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -465,13 +465,17 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
	max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;


	/* Don't even try to fragment, if we need more than 16 fragments */
	/* Don't even try to fragment, if we need more than 16 fragments */
	if (skb->len > max_packet_size)
	if (skb->len > max_packet_size) {
		goto out;
		ret = -EAGAIN;
		goto free_skb;
	}


	bat_priv = orig_node->bat_priv;
	bat_priv = orig_node->bat_priv;
	primary_if = batadv_primary_if_get_selected(bat_priv);
	primary_if = batadv_primary_if_get_selected(bat_priv);
	if (!primary_if)
	if (!primary_if) {
		goto out;
		ret = -EINVAL;
		goto put_primary_if;
	}


	/* Create one header to be copied to all fragments */
	/* Create one header to be copied to all fragments */
	frag_header.packet_type = BATADV_UNICAST_FRAG;
	frag_header.packet_type = BATADV_UNICAST_FRAG;
@@ -496,34 +500,35 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	/* Eat and send fragments from the tail of skb */
	/* Eat and send fragments from the tail of skb */
	while (skb->len > max_fragment_size) {
	while (skb->len > max_fragment_size) {
		skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
		skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
		if (!skb_fragment)
		if (!skb_fragment) {
			goto out;
			ret = -ENOMEM;
			goto free_skb;
		}


		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
		batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
		batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
				   skb_fragment->len + ETH_HLEN);
				   skb_fragment->len + ETH_HLEN);
		ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
		ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
		if (ret != NET_XMIT_SUCCESS) {
		if (ret != NET_XMIT_SUCCESS) {
			/* return -1 so that the caller can free the original
			ret = NET_XMIT_DROP;
			 * skb
			goto free_skb;
			 */
			ret = -1;
			goto out;
		}
		}


		frag_header.no++;
		frag_header.no++;


		/* The initial check in this function should cover this case */
		/* The initial check in this function should cover this case */
		if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
		if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
			ret = -1;
			ret = -EINVAL;
			goto out;
			goto free_skb;
		}
		}
	}
	}


	/* Make room for the fragment header. */
	/* Make room for the fragment header. */
	if (batadv_skb_head_push(skb, header_size) < 0 ||
	if (batadv_skb_head_push(skb, header_size) < 0 ||
	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
	    pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
		goto out;
		ret = -ENOMEM;
		goto free_skb;
	}


	memcpy(skb->data, &frag_header, header_size);
	memcpy(skb->data, &frag_header, header_size);


@@ -532,10 +537,13 @@ int batadv_frag_send_packet(struct sk_buff *skb,
	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
	batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
			   skb->len + ETH_HLEN);
			   skb->len + ETH_HLEN);
	ret = batadv_send_unicast_skb(skb, neigh_node);
	ret = batadv_send_unicast_skb(skb, neigh_node);
	/* skb was consumed */
	skb = NULL;


out:
put_primary_if:
	if (primary_if)
	batadv_hardif_put(primary_if);
	batadv_hardif_put(primary_if);
free_skb:
	kfree_skb(skb);


	return ret;
	return ret;
}
}