Commit 8ba71dbb authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'skbuff-struct-group'



Kees Cook says:

====================
skbuff: Switch structure bounds to struct_group()

This is a pair of patches to add struct_group() to struct sk_buff. The
first is needed to work around sparse-specific complaints, and is new
for v2. The second patch is the same as originally sent as v1.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cb902b33 03f61041
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -79,9 +79,7 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
	u8 sw_hash = skb->sw_hash;
	u32 hash = skb->hash;
	skb_scrub_packet(skb, true);
	memset(&skb->headers_start, 0,
	       offsetof(struct sk_buff, headers_end) -
		       offsetof(struct sk_buff, headers_start));
	memset(&skb->headers, 0, sizeof(skb->headers));
	if (encapsulating) {
		skb->l4_hash = l4_hash;
		skb->sw_hash = sw_hash;
+22 −24
Original line number Diff line number Diff line
@@ -795,7 +795,7 @@ struct sk_buff {
#else
#define CLONED_MASK	1
#endif
#define CLONED_OFFSET()		offsetof(struct sk_buff, __cloned_offset)
#define CLONED_OFFSET		offsetof(struct sk_buff, __cloned_offset)

	/* private: */
	__u8			__cloned_offset[0];
@@ -811,25 +811,15 @@ struct sk_buff {
	__u8			active_extensions;
#endif

	/* fields enclosed in headers_start/headers_end are copied
	/* Fields enclosed in headers group are copied
	 * using a single memcpy() in __copy_skb_header()
	 */
	/* private: */
	__u32			headers_start[0];
	/* public: */

/* if you move pkt_type around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_TYPE_MAX	(7 << 5)
#else
#define PKT_TYPE_MAX	7
#endif
#define PKT_TYPE_OFFSET()	offsetof(struct sk_buff, __pkt_type_offset)
	struct_group(headers,

	/* private: */
	__u8			__pkt_type_offset[0];
	/* public: */
	__u8			pkt_type:3;
	__u8			pkt_type:3; /* see PKT_TYPE_MAX */
	__u8			ignore_df:1;
	__u8			nf_trace:1;
	__u8			ip_summed:2;
@@ -845,16 +835,10 @@ struct sk_buff {
	__u8			encap_hdr_csum:1;
	__u8			csum_valid:1;

#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_VLAN_PRESENT_BIT	7
#else
#define PKT_VLAN_PRESENT_BIT	0
#endif
#define PKT_VLAN_PRESENT_OFFSET()	offsetof(struct sk_buff, __pkt_vlan_present_offset)
	/* private: */
	__u8			__pkt_vlan_present_offset[0];
	/* public: */
	__u8			vlan_present:1;
	__u8			vlan_present:1;	/* See PKT_VLAN_PRESENT_BIT */
	__u8			csum_complete_sw:1;
	__u8			csum_level:2;
	__u8			csum_not_inet:1;
@@ -935,9 +919,7 @@ struct sk_buff {
	u64			kcov_handle;
#endif

	/* private: */
	__u32			headers_end[0];
	/* public: */
	); /* end headers group */

	/* These elements must be at the end, see alloc_skb() for details.  */
	sk_buff_data_t		tail;
@@ -953,6 +935,22 @@ struct sk_buff {
#endif
};

/* if you move pkt_type around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_TYPE_MAX	(7 << 5)
#else
#define PKT_TYPE_MAX	7
#endif
#define PKT_TYPE_OFFSET		offsetof(struct sk_buff, __pkt_type_offset)

/* if you move pkt_vlan_present around you also must adapt these constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_VLAN_PRESENT_BIT	7
#else
#define PKT_VLAN_PRESENT_BIT	0
#endif
#define PKT_VLAN_PRESENT_OFFSET	offsetof(struct sk_buff, __pkt_vlan_present_offset)

#ifdef __KERNEL__
/*
 *	Handling routines are only of interest to the kernel
+5 −5
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
		break;

	case SKF_AD_PKTTYPE:
		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET);
		*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
		*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
@@ -323,7 +323,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
				      offsetof(struct sk_buff, vlan_tci));
		break;
	case SKF_AD_VLAN_TAG_PRESENT:
		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
		*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET);
		if (PKT_VLAN_PRESENT_BIT)
			*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
		if (PKT_VLAN_PRESENT_BIT < 7)
@@ -8029,7 +8029,7 @@ static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
	 * (Fast-path, otherwise approximation that we might be
	 *  a clone, do the rest in helper.)
	 */
	*insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
	*insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET);
	*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
	*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);

@@ -8617,7 +8617,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
	case offsetof(struct __sk_buff, pkt_type):
		*target_size = 1;
		*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
				      PKT_TYPE_OFFSET());
				      PKT_TYPE_OFFSET);
		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
@@ -8642,7 +8642,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
	case offsetof(struct __sk_buff, vlan_present):
		*target_size = 1;
		*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
				      PKT_VLAN_PRESENT_OFFSET());
				      PKT_VLAN_PRESENT_OFFSET);
		if (PKT_VLAN_PRESENT_BIT)
			*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
		if (PKT_VLAN_PRESENT_BIT < 7)
+5 −9
Original line number Diff line number Diff line
@@ -992,12 +992,10 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
}
EXPORT_SYMBOL(napi_consume_skb);

/* Make sure a field is enclosed inside headers_start/headers_end section */
/* Make sure a field is contained by headers group */
#define CHECK_SKB_FIELD(field) \
	BUILD_BUG_ON(offsetof(struct sk_buff, field) <		\
		     offsetof(struct sk_buff, headers_start));	\
	BUILD_BUG_ON(offsetof(struct sk_buff, field) >		\
		     offsetof(struct sk_buff, headers_end));	\
	BUILD_BUG_ON(offsetof(struct sk_buff, field) !=		\
		     offsetof(struct sk_buff, headers.field));	\

static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
@@ -1009,14 +1007,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
	__skb_ext_copy(new, old);
	__nf_copy(new, old, false);

	/* Note : this field could be in headers_start/headers_end section
	/* Note : this field could be in the headers group.
	 * It is not yet because we do not want to have a 16 bit hole
	 */
	new->queue_mapping = old->queue_mapping;

	memcpy(&new->headers_start, &old->headers_start,
	       offsetof(struct sk_buff, headers_end) -
	       offsetof(struct sk_buff, headers_start));
	memcpy(&new->headers, &old->headers, sizeof(new->headers));
	CHECK_SKB_FIELD(protocol);
	CHECK_SKB_FIELD(csum);
	CHECK_SKB_FIELD(hash);