Commit db473df2 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'selftests/xsk: speed-ups, fixes, and new XDP programs'



Magnus Karlsson says:

====================

This is a patch set of various performance improvements, fixes, and
the introduction of more than one XDP program to the xsk selftests
framework so we can test more things in the future such as upcoming
multi-buffer and metadata support for AF_XDP. The new programs just
reuse the framework that all the other eBPF selftests use. The new
feature is used to implement one new test that does XDP_DROP on every
other packet. More tests using this will be added in future commits.

Contents:

* The run-time of the test suite is cut by 10x when executing the
  tests on a real NIC, by only attaching the XDP program once per mode
  tested, instead of once per test program.

* Over 700 lines of code have been removed. The xsk.c control file was
  moved straight over from libbpf when the xsk support was deprecated
  there. As it is now not used as library code that has to work with
  all kinds of versions of Linux, a lot of code could be dropped or
  simplified.

* Add a new command line option "-d" that can be used when a test
  fails and you want to debug it with gdb or some other debugger. The
  option creates the two veth netdevs and prints them to the screen
  without deleting them afterwards. This way these veth netdevs can be
  used when running xskxceiver in a debugger.

* Implemented the possibility to load external XDP programs so we can
  have more than the default one. This feature is used to implement a
  test where every other packet is dropped. Good exercise for the
  recycling mechanism of the xsk buffer pool used in zero-copy mode.

* Various clean-ups and small fixes in patches 1 to 5. None of these
  fixes has any impact on the correct execution of the tests when they
  pass, though they can be irritating when a test fails. IMHO, they do
  not need to go to bpf as they will not fix anything there. The first
  version of patches 1, 2, and 4 where previously sent to bpf, but has
  now been included here.

v2 -> v3:
* Fixed compilation error for llvm [David]
* Made the function xsk_is_in_drv_mode(ifobj) more generic by changing
  it to xsk_is_in_mode(ifobj, xdp_mode) [Maciej]
* Added Maciej's acks to all the patches

v1 -> v2:
* Fixed spelling error in commit message of patch #6 [Björn]
* Added explanation on why it is safe to use C11 atomics in patch #7
  [Daniel]
* Put all XDP programs in the same file so that adding more XDP
  programs to xskxceiver.c becomes more scalable in patches #11 and
  #12 [Maciej]
* Removed more dead code in patch #8 [Maciej]
* Removed stale %s specifier in error print, patch #9 [Maciej]
* Changed name of XDP_CONSUMES_SOME_PACKETS to XDP_DROP_HALF to
  hopefully make it clearer [Maciej]
* ifobj_rx and ifobj_tx name changes in patch #13 [Maciej]
* Simplified XDP attachment code in patch #15 [Maciej]

Patches:
1-5:   Small fixes and clean-ups
6:     New convenient debug option when using a debugger such as gdb
7-8:   Removal of unnecessary code
9:     Add the ability to load external XDP programs
10-11: Removal of more unnecessary code
12:    Implement a new test where every other packet is XDP_DROP:ed
13:    Unify the thread dispatching code
14-15: Simplify the way tests are written when using custom packet_streams
       or custom XDP programs

Thanks: Magnus
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 5fbea423 7d8319a7
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -240,7 +240,6 @@ $(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
$(OUTPUT)/xsk.o: $(BPFOBJ)
$(OUTPUT)/xskxceiver: $(OUTPUT)/xsk.o

BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile)    \
@@ -383,6 +382,7 @@ linked_maps.skel.h-deps := linked_maps1.bpf.o linked_maps2.bpf.o
test_subskeleton.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o test_subskeleton.bpf.o
test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.bpf.o test_subskeleton_lib.bpf.o
test_usdt.skel.h-deps := test_usdt.bpf.o test_usdt_multispec.bpf.o
xsk_xdp_progs.skel.h-deps := xsk_xdp_progs.bpf.o

LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))

@@ -576,6 +576,10 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
	$(call msg,BINARY,,$@)
	$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@

$(OUTPUT)/xskxceiver: xskxceiver.c $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT)
	$(call msg,BINARY,,$@)
	$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@

# Make sure we are able to include and link libbpf against c++.
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
	$(call msg,CXX,,$@)
+30 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Intel */

#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>

struct {
	__uint(type, BPF_MAP_TYPE_XSKMAP);
	__uint(max_entries, 1);
	__uint(key_size, sizeof(int));
	__uint(value_size, sizeof(int));
} xsk SEC(".maps");

static unsigned int idx;

SEC("xdp") int xsk_def_prog(struct xdp_md *xdp)
{
	return bpf_redirect_map(&xsk, 0, XDP_DROP);
}

SEC("xdp") int xsk_xdp_drop(struct xdp_md *xdp)
{
	/* Drop every other packet */
	if (idx++ % 2)
		return XDP_DROP;

	return bpf_redirect_map(&xsk, 0, XDP_DROP);
}

char _license[] SEC("license") = "GPL";
+21 −21
Original line number Diff line number Diff line
@@ -24,8 +24,6 @@
#      -----------     |     ----------
#      |  vethX  | --------- |  vethY |
#      -----------   peer    ----------
#           |          |          |
#      namespaceX      |     namespaceY
#
# AF_XDP is an address family optimized for high performance packet processing,
# it is XDP’s user-space interface.
@@ -39,10 +37,9 @@
# Prerequisites setup by script:
#
#   Set up veth interfaces as per the topology shown ^^:
#   * setup two veth interfaces and one namespace
#   ** veth<xxxx> in root namespace
#   ** veth<yyyy> in af_xdp<xxxx> namespace
#   ** namespace af_xdp<xxxx>
#   * setup two veth interfaces
#   ** veth<xxxx>
#   ** veth<yyyy>
#   *** xxxx and yyyy are randomly generated 4 digit numbers used to avoid
#       conflict with any existing interface
#   * tests the veth and xsk layers of the topology
@@ -74,6 +71,9 @@
# Run and dump packet contents:
#   sudo ./test_xsk.sh -D
#
# Set up veth interfaces and leave them up so xskxceiver can be launched in a debugger:
#   sudo ./test_xsk.sh -d
#
# Run test suite for physical device in loopback mode
#   sudo ./test_xsk.sh -i IFACE

@@ -81,11 +81,12 @@

ETH=""

while getopts "vDi:" flag
while getopts "vDi:d" flag
do
	case "${flag}" in
		v) verbose=1;;
		D) dump_pkts=1;;
		d) debug=1;;
		i) ETH=${OPTARG};;
	esac
done
@@ -99,28 +100,25 @@ VETH0_POSTFIX=$(cat ${URANDOM} | tr -dc '0-9' | fold -w 256 | head -n 1 | head -
VETH0=ve${VETH0_POSTFIX}
VETH1_POSTFIX=$(cat ${URANDOM} | tr -dc '0-9' | fold -w 256 | head -n 1 | head --bytes 4)
VETH1=ve${VETH1_POSTFIX}
NS0=root
NS1=af_xdp${VETH1_POSTFIX}
MTU=1500

trap ctrl_c INT

function ctrl_c() {
        cleanup_exit ${VETH0} ${VETH1} ${NS1}
        cleanup_exit ${VETH0} ${VETH1}
	exit 1
}

setup_vethPairs() {
	if [[ $verbose -eq 1 ]]; then
	        echo "setting up ${VETH0}: namespace: ${NS0}"
	        echo "setting up ${VETH0}"
	fi
	ip netns add ${NS1}
	ip link add ${VETH0} numtxqueues 4 numrxqueues 4 type veth peer name ${VETH1} numtxqueues 4 numrxqueues 4
	if [ -f /proc/net/if_inet6 ]; then
		echo 1 > /proc/sys/net/ipv6/conf/${VETH0}/disable_ipv6
	fi
	if [[ $verbose -eq 1 ]]; then
	        echo "setting up ${VETH1}: namespace: ${NS1}"
	        echo "setting up ${VETH1}"
	fi

	if [[ $busy_poll -eq 1 ]]; then
@@ -130,18 +128,15 @@ setup_vethPairs() {
		echo 200000 > /sys/class/net/${VETH1}/gro_flush_timeout
	fi

	ip link set ${VETH1} netns ${NS1}
	ip netns exec ${NS1} ip link set ${VETH1} mtu ${MTU}
	ip link set ${VETH1} mtu ${MTU}
	ip link set ${VETH0} mtu ${MTU}
	ip netns exec ${NS1} ip link set ${VETH1} up
	ip netns exec ${NS1} ip link set dev lo up
	ip link set ${VETH1} up
	ip link set ${VETH0} up
}

if [ ! -z $ETH ]; then
	VETH0=${ETH}
	VETH1=${ETH}
	NS1=""
else
	validate_root_exec
	validate_veth_support ${VETH0}
@@ -151,7 +146,7 @@ else
	retval=$?
	if [ $retval -ne 0 ]; then
		test_status $retval "${TEST_NAME}"
		cleanup_exit ${VETH0} ${VETH1} ${NS1}
		cleanup_exit ${VETH0} ${VETH1}
		exit $retval
	fi
fi
@@ -174,10 +169,15 @@ statusList=()

TEST_NAME="XSK_SELFTESTS_${VETH0}_SOFTIRQ"

if [[ $debug -eq 1 ]]; then
    echo "-i" ${VETH0} "-i" ${VETH1}
    exit
fi

exec_xskxceiver

if [ -z $ETH ]; then
	cleanup_exit ${VETH0} ${VETH1} ${NS1}
	cleanup_exit ${VETH0} ${VETH1}
fi
TEST_NAME="XSK_SELFTESTS_${VETH0}_BUSY_POLL"
busy_poll=1
@@ -190,7 +190,7 @@ exec_xskxceiver
## END TESTS

if [ -z $ETH ]; then
	cleanup_exit ${VETH0} ${VETH1} ${NS1}
	cleanup_exit ${VETH0} ${VETH1}
fi

failures=0
+36 −641
Original line number Diff line number Diff line
@@ -49,10 +49,7 @@

#define pr_warn(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)

enum xsk_prog {
	XSK_PROG_FALLBACK,
	XSK_PROG_REDIRECT_FLAGS,
};
#define XSKMAP_SIZE 1

struct xsk_umem {
	struct xsk_ring_prod *fill_save;
@@ -74,43 +71,16 @@ struct xsk_ctx {
	int refcount;
	int ifindex;
	struct list_head list;
	int prog_fd;
	int link_fd;
	int xsks_map_fd;
	char ifname[IFNAMSIZ];
	bool has_bpf_link;
};

struct xsk_socket {
	struct xsk_ring_cons *rx;
	struct xsk_ring_prod *tx;
	__u64 outstanding_tx;
	struct xsk_ctx *ctx;
	struct xsk_socket_config config;
	int fd;
};

struct xsk_nl_info {
	bool xdp_prog_attached;
	int ifindex;
	int fd;
};

/* Up until and including Linux 5.3 */
struct xdp_ring_offset_v1 {
	__u64 producer;
	__u64 consumer;
	__u64 desc;
};

/* Up until and including Linux 5.3 */
struct xdp_mmap_offsets_v1 {
	struct xdp_ring_offset_v1 rx;
	struct xdp_ring_offset_v1 tx;
	struct xdp_ring_offset_v1 fr;
	struct xdp_ring_offset_v1 cr;
};

int xsk_umem__fd(const struct xsk_umem *umem)
{
	return umem ? umem->fd : -EINVAL;
@@ -153,55 +123,17 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
	if (!usr_cfg) {
		cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
		cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
		cfg->libbpf_flags = 0;
		cfg->xdp_flags = 0;
		cfg->bind_flags = 0;
		return 0;
	}

	if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
		return -EINVAL;

	cfg->rx_size = usr_cfg->rx_size;
	cfg->tx_size = usr_cfg->tx_size;
	cfg->libbpf_flags = usr_cfg->libbpf_flags;
	cfg->xdp_flags = usr_cfg->xdp_flags;
	cfg->bind_flags = usr_cfg->bind_flags;

	return 0;
}

static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
{
	struct xdp_mmap_offsets_v1 off_v1;

	/* getsockopt on a kernel <= 5.3 has no flags fields.
	 * Copy over the offsets to the correct places in the >=5.4 format
	 * and put the flags where they would have been on that kernel.
	 */
	memcpy(&off_v1, off, sizeof(off_v1));

	off->rx.producer = off_v1.rx.producer;
	off->rx.consumer = off_v1.rx.consumer;
	off->rx.desc = off_v1.rx.desc;
	off->rx.flags = off_v1.rx.consumer + sizeof(__u32);

	off->tx.producer = off_v1.tx.producer;
	off->tx.consumer = off_v1.tx.consumer;
	off->tx.desc = off_v1.tx.desc;
	off->tx.flags = off_v1.tx.consumer + sizeof(__u32);

	off->fr.producer = off_v1.fr.producer;
	off->fr.consumer = off_v1.fr.consumer;
	off->fr.desc = off_v1.fr.desc;
	off->fr.flags = off_v1.fr.consumer + sizeof(__u32);

	off->cr.producer = off_v1.cr.producer;
	off->cr.consumer = off_v1.cr.consumer;
	off->cr.desc = off_v1.cr.desc;
	off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
}

static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
{
	socklen_t optlen;
@@ -215,11 +147,6 @@ static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
	if (optlen == sizeof(*off))
		return 0;

	if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
		xsk_mmap_offsets_v1(off);
		return 0;
	}

	return -EINVAL;
}

@@ -340,531 +267,56 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area,
	return err;
}

struct xsk_umem_config_v1 {
	__u32 fill_size;
	__u32 comp_size;
	__u32 frame_size;
	__u32 frame_headroom;
};

static enum xsk_prog get_xsk_prog(void)
{
	enum xsk_prog detected = XSK_PROG_FALLBACK;
	char data_in = 0, data_out;
	struct bpf_insn insns[] = {
		BPF_LD_MAP_FD(BPF_REG_1, 0),
		BPF_MOV64_IMM(BPF_REG_2, 0),
		BPF_MOV64_IMM(BPF_REG_3, XDP_PASS),
		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
		BPF_EXIT_INSN(),
	};
	LIBBPF_OPTS(bpf_test_run_opts, opts,
		.data_in = &data_in,
		.data_size_in = 1,
		.data_out = &data_out,
	);

	int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);

	map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL);
	if (map_fd < 0)
		return detected;

	insns[0].imm = map_fd;

	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
	if (prog_fd < 0) {
		close(map_fd);
		return detected;
	}

	ret = bpf_prog_test_run_opts(prog_fd, &opts);
	if (!ret && opts.retval == XDP_PASS)
		detected = XSK_PROG_REDIRECT_FLAGS;
	close(prog_fd);
	close(map_fd);
	return detected;
}

static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
	static const int log_buf_size = 16 * 1024;
	struct xsk_ctx *ctx = xsk->ctx;
	char log_buf[log_buf_size];
	int prog_fd;

	/* This is the fallback C-program:
	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
	 * {
	 *     int ret, index = ctx->rx_queue_index;
	 *
	 *     // A set entry here means that the correspnding queue_id
	 *     // has an active AF_XDP socket bound to it.
	 *     ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
	 *     if (ret > 0)
	 *         return ret;
	 *
	 *     // Fallback for pre-5.3 kernels, not supporting default
	 *     // action in the flags parameter.
	 *     if (bpf_map_lookup_elem(&xsks_map, &index))
	 *         return bpf_redirect_map(&xsks_map, index, 0);
	 *     return XDP_PASS;
	 * }
	 */
	struct bpf_insn prog[] = {
		/* r2 = *(u32 *)(r1 + 16) */
		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
		/* *(u32 *)(r10 - 4) = r2 */
		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
		/* r1 = xskmap[] */
		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
		/* r3 = XDP_PASS */
		BPF_MOV64_IMM(BPF_REG_3, 2),
		/* call bpf_redirect_map */
		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
		/* if w0 != 0 goto pc+13 */
		BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
		/* r2 = r10 */
		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
		/* r2 += -4 */
		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
		/* r1 = xskmap[] */
		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
		/* call bpf_map_lookup_elem */
		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
		/* r1 = r0 */
		BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
		/* r0 = XDP_PASS */
		BPF_MOV64_IMM(BPF_REG_0, 2),
		/* if r1 == 0 goto pc+5 */
		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
		/* r2 = *(u32 *)(r10 - 4) */
		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
		/* r1 = xskmap[] */
		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
		/* r3 = 0 */
		BPF_MOV64_IMM(BPF_REG_3, 0),
		/* call bpf_redirect_map */
		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
		/* The jumps are to this instruction */
		BPF_EXIT_INSN(),
	};

	/* This is the post-5.3 kernel C-program:
	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
	 * {
	 *     return bpf_redirect_map(&xsks_map, ctx->rx_queue_index, XDP_PASS);
	 * }
	 */
	struct bpf_insn prog_redirect_flags[] = {
		/* r2 = *(u32 *)(r1 + 16) */
		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
		/* r1 = xskmap[] */
		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
		/* r3 = XDP_PASS */
		BPF_MOV64_IMM(BPF_REG_3, 2),
		/* call bpf_redirect_map */
		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
		BPF_EXIT_INSN(),
	};
	size_t insns_cnt[] = {ARRAY_SIZE(prog),
			      ARRAY_SIZE(prog_redirect_flags),
	};
	struct bpf_insn *progs[] = {prog, prog_redirect_flags};
	enum xsk_prog option = get_xsk_prog();
	LIBBPF_OPTS(bpf_prog_load_opts, opts,
		.log_buf = log_buf,
		.log_size = log_buf_size,
	);

	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause",
				progs[option], insns_cnt[option], &opts);
	if (prog_fd < 0) {
		pr_warn("BPF log buffer:\n%s", log_buf);
		return prog_fd;
	}

	ctx->prog_fd = prog_fd;
	return 0;
}

static int xsk_create_bpf_link(struct xsk_socket *xsk)
{
	DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
	struct xsk_ctx *ctx = xsk->ctx;
	__u32 prog_id = 0;
	int link_fd;
	int err;

	err = bpf_xdp_query_id(ctx->ifindex, xsk->config.xdp_flags, &prog_id);
	if (err) {
		pr_warn("getting XDP prog id failed\n");
		return err;
	}

	/* if there's a netlink-based XDP prog loaded on interface, bail out
	 * and ask user to do the removal by himself
	 */
	if (prog_id) {
		pr_warn("Netlink-based XDP prog detected, please unload it in order to launch AF_XDP prog\n");
		return -EINVAL;
	}

	opts.flags = xsk->config.xdp_flags & ~(XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_REPLACE);

	link_fd = bpf_link_create(ctx->prog_fd, ctx->ifindex, BPF_XDP, &opts);
	if (link_fd < 0) {
		pr_warn("bpf_link_create failed: %s\n", strerror(errno));
		return link_fd;
	}

	ctx->link_fd = link_fd;
	return 0;
}

static int xsk_get_max_queues(struct xsk_socket *xsk)
{
	struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
	struct xsk_ctx *ctx = xsk->ctx;
	struct ifreq ifr = {};
	int fd, err, ret;

	fd = socket(AF_LOCAL, SOCK_DGRAM | SOCK_CLOEXEC, 0);
	if (fd < 0)
		return -errno;

	ifr.ifr_data = (void *)&channels;
	bpf_strlcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ);
	err = ioctl(fd, SIOCETHTOOL, &ifr);
	if (err && errno != EOPNOTSUPP) {
		ret = -errno;
		goto out;
	}

	if (err) {
		/* If the device says it has no channels, then all traffic
		 * is sent to a single stream, so max queues = 1.
		 */
		ret = 1;
	} else {
		/* Take the max of rx, tx, combined. Drivers return
		 * the number of channels in different ways.
		 */
		ret = max(channels.max_rx, channels.max_tx);
		ret = max(ret, (int)channels.max_combined);
	}

out:
	close(fd);
	return ret;
}

static int xsk_create_bpf_maps(struct xsk_socket *xsk)
{
	struct xsk_ctx *ctx = xsk->ctx;
	int max_queues;
	int fd;

	max_queues = xsk_get_max_queues(xsk);
	if (max_queues < 0)
		return max_queues;

	fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map",
			    sizeof(int), sizeof(int), max_queues, NULL);
	if (fd < 0)
		return fd;

	ctx->xsks_map_fd = fd;

	return 0;
}

static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
{
	struct xsk_ctx *ctx = xsk->ctx;

	bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
	close(ctx->xsks_map_fd);
}

static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
{
	__u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
	__u32 map_len = sizeof(struct bpf_map_info);
	struct bpf_prog_info prog_info = {};
	struct xsk_ctx *ctx = xsk->ctx;
	struct bpf_map_info map_info;
	int fd, err;

	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
	if (err)
		return err;

	num_maps = prog_info.nr_map_ids;

	map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
	if (!map_ids)
		return -ENOMEM;

	memset(&prog_info, 0, prog_len);
	prog_info.nr_map_ids = num_maps;
	prog_info.map_ids = (__u64)(unsigned long)map_ids;

	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
	if (err)
		goto out_map_ids;

	ctx->xsks_map_fd = -1;

	for (i = 0; i < prog_info.nr_map_ids; i++) {
		fd = bpf_map_get_fd_by_id(map_ids[i]);
		if (fd < 0)
			continue;

		memset(&map_info, 0, map_len);
		err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
		if (err) {
			close(fd);
			continue;
		}

		if (!strncmp(map_info.name, "xsks_map", sizeof(map_info.name))) {
			ctx->xsks_map_fd = fd;
			break;
		}

		close(fd);
	}

	if (ctx->xsks_map_fd == -1)
		err = -ENOENT;

out_map_ids:
	free(map_ids);
	return err;
}

static int xsk_set_bpf_maps(struct xsk_socket *xsk)
{
	struct xsk_ctx *ctx = xsk->ctx;

	return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
				   &xsk->fd, 0);
}

static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
bool xsk_is_in_mode(u32 ifindex, int mode)
{
	struct bpf_link_info link_info;
	__u32 link_len;
	__u32 id = 0;
	int err;
	int fd;
	LIBBPF_OPTS(bpf_xdp_query_opts, opts);
	int ret;

	while (true) {
		err = bpf_link_get_next_id(id, &id);
		if (err) {
			if (errno == ENOENT) {
				err = 0;
				break;
			}
			pr_warn("can't get next link: %s\n", strerror(errno));
			break;
	ret = bpf_xdp_query(ifindex, mode, &opts);
	if (ret) {
		printf("XDP mode query returned error %s\n", strerror(errno));
		return false;
	}

		fd = bpf_link_get_fd_by_id(id);
		if (fd < 0) {
			if (errno == ENOENT)
				continue;
			pr_warn("can't get link by id (%u): %s\n", id, strerror(errno));
			err = -errno;
			break;
		}

		link_len = sizeof(struct bpf_link_info);
		memset(&link_info, 0, link_len);
		err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
		if (err) {
			pr_warn("can't get link info: %s\n", strerror(errno));
			close(fd);
			break;
		}
		if (link_info.type == BPF_LINK_TYPE_XDP) {
			if (link_info.xdp.ifindex == ifindex) {
				*link_fd = fd;
				if (prog_id)
					*prog_id = link_info.prog_id;
				break;
			}
		}
		close(fd);
	}

	return err;
}

static bool xsk_probe_bpf_link(void)
{
	LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE);
	struct bpf_insn insns[2] = {
		BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
		BPF_EXIT_INSN()
	};
	int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns);
	int ifindex_lo = 1;
	bool ret = false;
	int err;

	err = xsk_link_lookup(ifindex_lo, NULL, &link_fd);
	if (err)
		return ret;

	if (link_fd >= 0)
		return true;

	prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
	if (prog_fd < 0)
		return ret;

	link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
	close(prog_fd);

	if (link_fd >= 0) {
		ret = true;
		close(link_fd);
	}
	if (mode == XDP_FLAGS_DRV_MODE)
		return opts.attach_mode == XDP_ATTACHED_DRV;
	else if (mode == XDP_FLAGS_SKB_MODE)
		return opts.attach_mode == XDP_ATTACHED_SKB;

	return ret;
	return false;
}

static int xsk_create_xsk_struct(int ifindex, struct xsk_socket *xsk)
int xsk_attach_xdp_program(struct bpf_program *prog, int ifindex, u32 xdp_flags)
{
	char ifname[IFNAMSIZ];
	struct xsk_ctx *ctx;
	char *interface;

	ctx = calloc(1, sizeof(*ctx));
	if (!ctx)
		return -ENOMEM;

	interface = if_indextoname(ifindex, &ifname[0]);
	if (!interface) {
		free(ctx);
		return -errno;
	}

	ctx->ifindex = ifindex;
	bpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);

	xsk->ctx = ctx;
	xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
	int prog_fd;

	return 0;
	prog_fd = bpf_program__fd(prog);
	return bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL);
}

static int xsk_init_xdp_res(struct xsk_socket *xsk,
			    int *xsks_map_fd)
void xsk_detach_xdp_program(int ifindex, u32 xdp_flags)
{
	struct xsk_ctx *ctx = xsk->ctx;
	int err;

	err = xsk_create_bpf_maps(xsk);
	if (err)
		return err;

	err = xsk_load_xdp_prog(xsk);
	if (err)
		goto err_load_xdp_prog;

	if (ctx->has_bpf_link)
		err = xsk_create_bpf_link(xsk);
	else
		err = bpf_xdp_attach(xsk->ctx->ifindex, ctx->prog_fd,
				     xsk->config.xdp_flags, NULL);

	if (err)
		goto err_attach_xdp_prog;

	if (!xsk->rx)
		return err;

	err = xsk_set_bpf_maps(xsk);
	if (err)
		goto err_set_bpf_maps;

	return err;

err_set_bpf_maps:
	if (ctx->has_bpf_link)
		close(ctx->link_fd);
	else
		bpf_xdp_detach(ctx->ifindex, 0, NULL);
err_attach_xdp_prog:
	close(ctx->prog_fd);
err_load_xdp_prog:
	xsk_delete_bpf_maps(xsk);
	return err;
	bpf_xdp_detach(ifindex, xdp_flags, NULL);
}

static int xsk_lookup_xdp_res(struct xsk_socket *xsk, int *xsks_map_fd, int prog_id)
void xsk_clear_xskmap(struct bpf_map *map)
{
	struct xsk_ctx *ctx = xsk->ctx;
	int err;

	ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
	if (ctx->prog_fd < 0) {
		err = -errno;
		goto err_prog_fd;
	}
	err = xsk_lookup_bpf_maps(xsk);
	if (err)
		goto err_lookup_maps;
	u32 index = 0;
	int map_fd;

	if (!xsk->rx)
		return err;

	err = xsk_set_bpf_maps(xsk);
	if (err)
		goto err_set_maps;

	return err;

err_set_maps:
	close(ctx->xsks_map_fd);
err_lookup_maps:
	close(ctx->prog_fd);
err_prog_fd:
	if (ctx->has_bpf_link)
		close(ctx->link_fd);
	return err;
	map_fd = bpf_map__fd(map);
	bpf_map_delete_elem(map_fd, &index);
}

static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
int xsk_update_xskmap(struct bpf_map *map, struct xsk_socket *xsk)
{
	struct xsk_socket *xsk = _xdp;
	struct xsk_ctx *ctx = xsk->ctx;
	__u32 prog_id = 0;
	int err;

	if (ctx->has_bpf_link)
		err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd);
	else
		err = bpf_xdp_query_id(ctx->ifindex, xsk->config.xdp_flags, &prog_id);

	if (err)
		return err;

	err = !prog_id ? xsk_init_xdp_res(xsk, xsks_map_fd) :
			 xsk_lookup_xdp_res(xsk, xsks_map_fd, prog_id);

	if (!err && xsks_map_fd)
		*xsks_map_fd = ctx->xsks_map_fd;
	int map_fd, sock_fd;
	u32 index = 0;

	return err;
}
	map_fd = bpf_map__fd(map);
	sock_fd = xsk_socket__fd(xsk);

int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd)
{
	return __xsk_setup_xdp_prog(xsk, xsks_map_fd);
	return bpf_map_update_elem(map_fd, &index, &sock_fd, 0);
}

static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
@@ -913,7 +365,7 @@ static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)

static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
				      struct xsk_umem *umem, int ifindex,
				      const char *ifname, __u32 queue_id,
				      __u32 queue_id,
				      struct xsk_ring_prod *fill,
				      struct xsk_ring_cons *comp)
{
@@ -940,51 +392,15 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
	ctx->refcount = 1;
	ctx->umem = umem;
	ctx->queue_id = queue_id;
	bpf_strlcpy(ctx->ifname, ifname, IFNAMSIZ);

	ctx->fill = fill;
	ctx->comp = comp;
	list_add(&ctx->list, &umem->ctx_list);
	ctx->has_bpf_link = xsk_probe_bpf_link();
	return ctx;
}

static void xsk_destroy_xsk_struct(struct xsk_socket *xsk)
{
	free(xsk->ctx);
	free(xsk);
}

int xsk_socket__update_xskmap(struct xsk_socket *xsk, int fd)
{
	xsk->ctx->xsks_map_fd = fd;
	return xsk_set_bpf_maps(xsk);
}

int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd)
{
	struct xsk_socket *xsk;
	int res;

	xsk = calloc(1, sizeof(*xsk));
	if (!xsk)
		return -ENOMEM;

	res = xsk_create_xsk_struct(ifindex, xsk);
	if (res) {
		free(xsk);
		return -EINVAL;
	}

	res = __xsk_setup_xdp_prog(xsk, xsks_map_fd);

	xsk_destroy_xsk_struct(xsk);

	return res;
}

int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
			      const char *ifname,
			      int ifindex,
			      __u32 queue_id, struct xsk_umem *umem,
			      struct xsk_ring_cons *rx,
			      struct xsk_ring_prod *tx,
@@ -998,7 +414,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
	struct xdp_mmap_offsets off;
	struct xsk_socket *xsk;
	struct xsk_ctx *ctx;
	int err, ifindex;
	int err;

	if (!umem || !xsk_ptr || !(rx || tx))
		return -EFAULT;
@@ -1013,13 +429,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
	if (err)
		goto out_xsk_alloc;

	xsk->outstanding_tx = 0;
	ifindex = if_nametoindex(ifname);
	if (!ifindex) {
		err = -errno;
		goto out_xsk_alloc;
	}

	if (umem->refcount++ > 0) {
		xsk->fd = socket(AF_XDP, SOCK_RAW | SOCK_CLOEXEC, 0);
		if (xsk->fd < 0) {
@@ -1039,8 +448,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
			goto out_socket;
		}

		ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
				     fill, comp);
		ctx = xsk_create_ctx(xsk, umem, ifindex, queue_id, fill, comp);
		if (!ctx) {
			err = -ENOMEM;
			goto out_socket;
@@ -1138,12 +546,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
		goto out_mmap_tx;
	}

	if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
		err = __xsk_setup_xdp_prog(xsk, NULL);
		if (err)
			goto out_mmap_tx;
	}

	*xsk_ptr = xsk;
	umem->fill_save = NULL;
	umem->comp_save = NULL;
@@ -1167,7 +569,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
	return err;
}

int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
int xsk_socket__create(struct xsk_socket **xsk_ptr, int ifindex,
		       __u32 queue_id, struct xsk_umem *umem,
		       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
		       const struct xsk_socket_config *usr_config)
@@ -1175,7 +577,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
	if (!umem)
		return -EFAULT;

	return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
	return xsk_socket__create_shared(xsk_ptr, ifindex, queue_id, umem,
					 rx, tx, umem->fill_save,
					 umem->comp_save, usr_config);
}
@@ -1219,13 +621,6 @@ void xsk_socket__delete(struct xsk_socket *xsk)
	ctx = xsk->ctx;
	umem = ctx->umem;

	if (ctx->refcount == 1) {
		xsk_delete_bpf_maps(xsk);
		close(ctx->prog_fd);
		if (ctx->has_bpf_link)
			close(ctx->link_fd);
	}

	xsk_put_ctx(ctx, true);

	err = xsk_get_mmap_offsets(xsk->fd, &off);
+11 −86

File changed.

Preview size limit exceeded, changes collapsed.

Loading