Commit 1372d34c authored by Toke Høiland-Jørgensen's avatar Toke Høiland-Jørgensen Committed by Alexei Starovoitov
Browse files

xdp: Add xdp_do_redirect_frame() for pre-computed xdp_frames



Add an xdp_do_redirect_frame() variant which supports pre-computed
xdp_frame structures. This will be used in bpf_prog_run() to avoid having
to write to the xdp_frame structure when the XDP program doesn't modify the
frame boundaries.

Signed-off-by: default avatarToke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20220103150812.87914-6-toke@redhat.com
parent d53ad5d8
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1019,6 +1019,10 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
		    struct xdp_buff *xdp,
		    struct bpf_prog *prog);
int xdp_do_redirect_frame(struct net_device *dev,
			  struct xdp_buff *xdp,
			  struct xdp_frame *xdpf,
			  struct bpf_prog *prog);
void xdp_do_flush(void);

/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+54 −11
Original line number Diff line number Diff line
@@ -3957,26 +3957,44 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
}
EXPORT_SYMBOL_GPL(xdp_master_redirect);

int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
					struct net_device *dev,
					struct xdp_buff *xdp,
					struct bpf_prog *xdp_prog)
{
	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
	enum bpf_map_type map_type = ri->map_type;
	void *fwd = ri->tgt_value;
	u32 map_id = ri->map_id;
	struct xdp_frame *xdpf;
	struct bpf_map *map;
	int err;

	ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
	ri->map_type = BPF_MAP_TYPE_UNSPEC;

	if (map_type == BPF_MAP_TYPE_XSKMAP) {
	err = __xsk_map_redirect(fwd, xdp);
		goto out;
	if (unlikely(err))
		goto err;

	_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
	return 0;
err:
	_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
	return err;
}

	xdpf = xdp_convert_buff_to_frame(xdp);
static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
						   struct net_device *dev,
						   struct xdp_frame *xdpf,
						   struct bpf_prog *xdp_prog)
{
	enum bpf_map_type map_type = ri->map_type;
	void *fwd = ri->tgt_value;
	u32 map_id = ri->map_id;
	struct bpf_map *map;
	int err;

	ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
	ri->map_type = BPF_MAP_TYPE_UNSPEC;

	if (unlikely(!xdpf)) {
		err = -EOVERFLOW;
		goto err;
@@ -4013,7 +4031,6 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
		err = -EBADRQC;
	}

out:
	if (unlikely(err))
		goto err;

@@ -4023,8 +4040,34 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
	_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
	return err;
}

int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
		    struct bpf_prog *xdp_prog)
{
	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
	enum bpf_map_type map_type = ri->map_type;

	if (map_type == BPF_MAP_TYPE_XSKMAP)
		return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);

	return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
				       xdp_prog);
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);

int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
			  struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
{
	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
	enum bpf_map_type map_type = ri->map_type;

	if (map_type == BPF_MAP_TYPE_XSKMAP)
		return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);

	return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog);
}
EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);

static int xdp_do_generic_redirect_map(struct net_device *dev,
				       struct sk_buff *skb,
				       struct xdp_buff *xdp,