Commit faef26fa authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov
Browse files

bpf, selftests: Use bpf_tail_call_static where appropriate



For those locations where we use an immediate tail call map index use the
newly added bpf_tail_call_static() helper.

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarMartin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/3cfb2b799a62d22c6e7ae5897c23940bdcc24cbc.1601477936.git.daniel@iogearbox.net
parent 0e9f6841
Loading
Loading
Loading
Loading
+11 −9
Original line number Diff line number Diff line
@@ -31,28 +31,30 @@ struct {
#define PARSE_IP 3
#define PARSE_IPV6 4

/* protocol dispatch routine.
 * It tail-calls next BPF program depending on eth proto
 * Note, we could have used:
/* Protocol dispatch routine. It tail-calls next BPF program depending
 * on eth proto. Note, we could have used ...
 *
 *   bpf_tail_call(skb, &jmp_table, proto);
 * but it would need large prog_array
 *
 * ... but it would need large prog_array and cannot be optimised given
 * the map key is not static.
 */
static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
{
	switch (proto) {
	case ETH_P_8021Q:
	case ETH_P_8021AD:
		bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
		bpf_tail_call_static(skb, &jmp_table, PARSE_VLAN);
		break;
	case ETH_P_MPLS_UC:
	case ETH_P_MPLS_MC:
		bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
		bpf_tail_call_static(skb, &jmp_table, PARSE_MPLS);
		break;
	case ETH_P_IP:
		bpf_tail_call(skb, &jmp_table, PARSE_IP);
		bpf_tail_call_static(skb, &jmp_table, PARSE_IP);
		break;
	case ETH_P_IPV6:
		bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
		bpf_tail_call_static(skb, &jmp_table, PARSE_IPV6);
		break;
	}
}
+6 −6
Original line number Diff line number Diff line
@@ -118,18 +118,18 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)

	switch (proto) {
	case bpf_htons(ETH_P_IP):
		bpf_tail_call(skb, &jmp_table, IP);
		bpf_tail_call_static(skb, &jmp_table, IP);
		break;
	case bpf_htons(ETH_P_IPV6):
		bpf_tail_call(skb, &jmp_table, IPV6);
		bpf_tail_call_static(skb, &jmp_table, IPV6);
		break;
	case bpf_htons(ETH_P_MPLS_MC):
	case bpf_htons(ETH_P_MPLS_UC):
		bpf_tail_call(skb, &jmp_table, MPLS);
		bpf_tail_call_static(skb, &jmp_table, MPLS);
		break;
	case bpf_htons(ETH_P_8021Q):
	case bpf_htons(ETH_P_8021AD):
		bpf_tail_call(skb, &jmp_table, VLAN);
		bpf_tail_call_static(skb, &jmp_table, VLAN);
		break;
	default:
		/* Protocol not supported */
@@ -246,10 +246,10 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
	switch (nexthdr) {
	case IPPROTO_HOPOPTS:
	case IPPROTO_DSTOPTS:
		bpf_tail_call(skb, &jmp_table, IPV6OP);
		bpf_tail_call_static(skb, &jmp_table, IPV6OP);
		break;
	case IPPROTO_FRAGMENT:
		bpf_tail_call(skb, &jmp_table, IPV6FR);
		bpf_tail_call_static(skb, &jmp_table, IPV6FR);
		break;
	default:
		return parse_ip_proto(skb, nexthdr);
+14 −14
Original line number Diff line number Diff line
@@ -26,20 +26,20 @@ int entry(struct __sk_buff *skb)
	/* Multiple locations to make sure we patch
	 * all of them.
	 */
	bpf_tail_call(skb, &jmp_table, 0);
	bpf_tail_call(skb, &jmp_table, 0);
	bpf_tail_call(skb, &jmp_table, 0);
	bpf_tail_call(skb, &jmp_table, 0);

	bpf_tail_call(skb, &jmp_table, 1);
	bpf_tail_call(skb, &jmp_table, 1);
	bpf_tail_call(skb, &jmp_table, 1);
	bpf_tail_call(skb, &jmp_table, 1);

	bpf_tail_call(skb, &jmp_table, 2);
	bpf_tail_call(skb, &jmp_table, 2);
	bpf_tail_call(skb, &jmp_table, 2);
	bpf_tail_call(skb, &jmp_table, 2);
	bpf_tail_call_static(skb, &jmp_table, 0);
	bpf_tail_call_static(skb, &jmp_table, 0);
	bpf_tail_call_static(skb, &jmp_table, 0);
	bpf_tail_call_static(skb, &jmp_table, 0);

	bpf_tail_call_static(skb, &jmp_table, 1);
	bpf_tail_call_static(skb, &jmp_table, 1);
	bpf_tail_call_static(skb, &jmp_table, 1);
	bpf_tail_call_static(skb, &jmp_table, 1);

	bpf_tail_call_static(skb, &jmp_table, 2);
	bpf_tail_call_static(skb, &jmp_table, 2);
	bpf_tail_call_static(skb, &jmp_table, 2);
	bpf_tail_call_static(skb, &jmp_table, 2);

	return 3;
}
+7 −7
Original line number Diff line number Diff line
@@ -13,14 +13,14 @@ struct {
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
{
	bpf_tail_call(skb, &jmp_table, 1);
	bpf_tail_call_static(skb, &jmp_table, 1);
	return 0;
}

SEC("classifier/1")
int bpf_func_1(struct __sk_buff *skb)
{
	bpf_tail_call(skb, &jmp_table, 2);
	bpf_tail_call_static(skb, &jmp_table, 2);
	return 1;
}

@@ -33,25 +33,25 @@ int bpf_func_2(struct __sk_buff *skb)
SEC("classifier/3")
int bpf_func_3(struct __sk_buff *skb)
{
	bpf_tail_call(skb, &jmp_table, 4);
	bpf_tail_call_static(skb, &jmp_table, 4);
	return 3;
}

SEC("classifier/4")
int bpf_func_4(struct __sk_buff *skb)
{
	bpf_tail_call(skb, &jmp_table, 3);
	bpf_tail_call_static(skb, &jmp_table, 3);
	return 4;
}

SEC("classifier")
int entry(struct __sk_buff *skb)
{
	bpf_tail_call(skb, &jmp_table, 0);
	bpf_tail_call_static(skb, &jmp_table, 0);
	/* Check multi-prog update. */
	bpf_tail_call(skb, &jmp_table, 2);
	bpf_tail_call_static(skb, &jmp_table, 2);
	/* Check tail call limit. */
	bpf_tail_call(skb, &jmp_table, 3);
	bpf_tail_call_static(skb, &jmp_table, 3);
	return 3;
}

+2 −2
Original line number Diff line number Diff line
@@ -16,14 +16,14 @@ SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
{
	count++;
	bpf_tail_call(skb, &jmp_table, 0);
	bpf_tail_call_static(skb, &jmp_table, 0);
	return 1;
}

SEC("classifier")
int entry(struct __sk_buff *skb)
{
	bpf_tail_call(skb, &jmp_table, 0);
	bpf_tail_call_static(skb, &jmp_table, 0);
	return 0;
}

Loading