Commit 6e945d57 authored by Jörn-Thorben Hinz's avatar Jörn-Thorben Hinz Committed by Alexei Starovoitov
Browse files

selftests/bpf: Test a BPF CC writing sk_pacing_*



Test whether a TCP CC implemented in BPF is allowed to write
sk_pacing_rate and sk_pacing_status in struct sock. This is needed when
cong_control() is implemented and used.

Signed-off-by: default avatarJörn-Thorben Hinz <jthinz@mailbox.tu-berlin.de>
Link: https://lore.kernel.org/r/20220622191227.898118-4-jthinz@mailbox.tu-berlin.de


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 9f0265e9
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
#include "bpf_cubic.skel.h"
#include "bpf_tcp_nogpl.skel.h"
#include "bpf_dctcp_release.skel.h"
#include "tcp_ca_write_sk_pacing.skel.h"

#ifndef ENOTSUPP
#define ENOTSUPP 524
@@ -322,6 +323,22 @@ static void test_rel_setsockopt(void)
	bpf_dctcp_release__destroy(rel_skel);
}

static void test_write_sk_pacing(void)
{
	struct tcp_ca_write_sk_pacing *skel;
	struct bpf_link *link;

	skel = tcp_ca_write_sk_pacing__open_and_load();
	if (!ASSERT_OK_PTR(skel, "open_and_load"))
		return;

	link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
	ASSERT_OK_PTR(link, "attach_struct_ops");

	bpf_link__destroy(link);
	tcp_ca_write_sk_pacing__destroy(skel);
}

void test_bpf_tcp_ca(void)
{
	if (test__start_subtest("dctcp"))
@@ -334,4 +351,6 @@ void test_bpf_tcp_ca(void)
		test_dctcp_fallback();
	if (test__start_subtest("rel_setsockopt"))
		test_rel_setsockopt();
	if (test__start_subtest("write_sk_pacing"))
		test_write_sk_pacing();
}
+60 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0

#include "vmlinux.h"

#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>

char _license[] SEC("license") = "GPL";

#define USEC_PER_SEC 1000000UL

#define min(a, b) ((a) < (b) ? (a) : (b))

static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
	return (struct tcp_sock *)sk;
}

SEC("struct_ops/write_sk_pacing_init")
void BPF_PROG(write_sk_pacing_init, struct sock *sk)
{
#ifdef ENABLE_ATOMICS_TESTS
	__sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
				     SK_PACING_NEEDED);
#else
	sk->sk_pacing_status = SK_PACING_NEEDED;
#endif
}

SEC("struct_ops/write_sk_pacing_cong_control")
void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
	      const struct rate_sample *rs)
{
	const struct tcp_sock *tp = tcp_sk(sk);
	unsigned long rate =
		((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
		(tp->srtt_us ?: 1U << 3);
	sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
}

SEC("struct_ops/write_sk_pacing_ssthresh")
__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
{
	return tcp_sk(sk)->snd_ssthresh;
}

SEC("struct_ops/write_sk_pacing_undo_cwnd")
__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
{
	return tcp_sk(sk)->snd_cwnd;
}

SEC(".struct_ops")
struct tcp_congestion_ops write_sk_pacing = {
	.init = (void *)write_sk_pacing_init,
	.cong_control = (void *)write_sk_pacing_cong_control,
	.ssthresh = (void *)write_sk_pacing_ssthresh,
	.undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
	.name = "bpf_w_sk_pacing",
};