Commit 32f91529 authored by Daniel Borkmann's avatar Daniel Borkmann
Browse files

Merge branch 'bpf-xdp-redirect'



Björn Töpel says:

====================
This two patch series contain two optimizations for the
bpf_redirect_map() helper and the xdp_do_redirect() function.

The bpf_redirect_map() optimization is about avoiding the map lookup
dispatching. Instead of having a switch-statement and selecting the
correct lookup function, we let bpf_redirect_map() be a map operation,
where each map has its own bpf_redirect_map() implementation. This way
the run-time lookup is avoided.

The xdp_do_redirect() patch restructures the code, so that the map
pointer indirection can be avoided.

Performance-wise I got 4% improvement for XSKMAP
(sample:xdpsock/rx-drop), and 8% (sample:xdp_redirect_map) on my
machine.

v5->v6:  Removed REDIR enum, and instead use map_id and map_type. (Daniel)
         Applied Daniel's fixups on patch 1. (Daniel)
v4->v5:  Renamed map operation to map_redirect. (Daniel)
v3->v4:  Made bpf_redirect_map() a map operation. (Daniel)
v2->v3:  Fix build when CONFIG_NET is not set. (lkp)
v1->v2:  Removed warning when CONFIG_BPF_SYSCALL was not set. (lkp)
         Cleaned up case-clause in xdp_do_generic_redirect_map(). (Toke)
         Re-added comment. (Toke)
rfc->v1: Use map_id, and remove bpf_clear_redirect_map(). (Toke)
         Get rid of the macro and use __always_inline. (Jesper)
====================

Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 11d39cfe ee75aef2
Loading
Loading
Loading
Loading
+6 −20
Original line number Diff line number Diff line
@@ -118,6 +118,9 @@ struct bpf_map_ops {
					   void *owner, u32 size);
	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);

	/* Misc helpers.*/
	int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);

	/* map_meta_equal must be implemented for maps that can be
	 * used as an inner map.  It is a runtime check to ensure
	 * an inner map can be inserted to an outer map.
@@ -1450,9 +1453,9 @@ struct btf *bpf_get_btf_vmlinux(void);
/* Map specifics */
struct xdp_buff;
struct sk_buff;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;

struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_flush(void);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
@@ -1462,7 +1465,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
			     struct bpf_prog *xdp_prog);
bool dev_map_can_have_prog(struct bpf_map *map);

struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
		    struct net_device *dev_rx);
@@ -1593,17 +1595,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
	return -EOPNOTSUPP;
}

static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
						       u32 key)
{
	return NULL;
}

static inline struct net_device  *__dev_map_hash_lookup_elem(struct bpf_map *map,
							     u32 key)
{
	return NULL;
}
static inline bool dev_map_can_have_prog(struct bpf_map *map)
{
	return false;
@@ -1615,6 +1606,7 @@ static inline void __dev_flush(void)

struct xdp_buff;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;

static inline
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
@@ -1639,12 +1631,6 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
	return 0;
}

static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
	return NULL;
}

static inline void __cpu_map_flush(void)
{
}
+30 −1
Original line number Diff line number Diff line
@@ -646,7 +646,8 @@ struct bpf_redirect_info {
	u32 flags;
	u32 tgt_index;
	void *tgt_value;
	struct bpf_map *map;
	u32 map_id;
	enum bpf_map_type map_type;
	u32 kern_flags;
	struct bpf_nh_params nh;
};
@@ -1472,4 +1473,32 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
}
#endif /* IS_ENABLED(CONFIG_IPV6) */

static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags,
						  void *lookup_elem(struct bpf_map *map, u32 key))
{
	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);

	/* Lower bits of the flags are used as return code on lookup failure */
	if (unlikely(flags > XDP_TX))
		return XDP_ABORTED;

	ri->tgt_value = lookup_elem(map, ifindex);
	if (unlikely(!ri->tgt_value)) {
		/* If the lookup fails we want to clear out the state in the
		 * redirect_info struct completely, so that if an eBPF program
		 * performs multiple lookups, the last one always takes
		 * precedence.
		 */
		ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
		ri->map_type = BPF_MAP_TYPE_UNSPEC;
		return flags;
	}

	ri->tgt_index = ifindex;
	ri->map_id = map->id;
	ri->map_type = map->map_type;

	return XDP_REDIRECT;
}

#endif /* __LINUX_FILTER_H__ */
+0 −19
Original line number Diff line number Diff line
@@ -80,19 +80,6 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);

static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
						     u32 key)
{
	struct xsk_map *m = container_of(map, struct xsk_map, map);
	struct xdp_sock *xs;

	if (key >= map->max_entries)
		return NULL;

	xs = READ_ONCE(m->xsk_map[key]);
	return xs;
}

#else

static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
@@ -109,12 +96,6 @@ static inline void __xsk_map_flush(void)
{
}

static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
						     u32 key)
{
	return NULL;
}

#endif /* CONFIG_XDP_SOCKETS */

#endif /* _LINUX_XDP_SOCK_H */
+35 −27
Original line number Diff line number Diff line
@@ -86,19 +86,15 @@ struct _bpf_dtab_netdev {
};
#endif /* __DEVMAP_OBJ_TYPE */

#define devmap_ifindex(tgt, map)				\
	(((map->map_type == BPF_MAP_TYPE_DEVMAP ||	\
		  map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
	  ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)

DECLARE_EVENT_CLASS(xdp_redirect_template,

	TP_PROTO(const struct net_device *dev,
		 const struct bpf_prog *xdp,
		 const void *tgt, int err,
		 const struct bpf_map *map, u32 index),
		 enum bpf_map_type map_type,
		 u32 map_id, u32 index),

	TP_ARGS(dev, xdp, tgt, err, map, index),
	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),

	TP_STRUCT__entry(
		__field(int, prog_id)
@@ -111,14 +107,22 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
	),

	TP_fast_assign(
		u32 ifindex = 0, map_index = index;

		if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
			ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
		} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
			ifindex = index;
			map_index = 0;
		}

		__entry->prog_id	= xdp->aux->id;
		__entry->act		= XDP_REDIRECT;
		__entry->ifindex	= dev->ifindex;
		__entry->err		= err;
		__entry->to_ifindex	= map ? devmap_ifindex(tgt, map) :
						index;
		__entry->map_id		= map ? map->id : 0;
		__entry->map_index	= map ? index : 0;
		__entry->to_ifindex	= ifindex;
		__entry->map_id		= map_id;
		__entry->map_index	= map_index;
	),

	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
@@ -133,45 +137,49 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
	TP_PROTO(const struct net_device *dev,
		 const struct bpf_prog *xdp,
		 const void *tgt, int err,
		 const struct bpf_map *map, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map, index)
		 enum bpf_map_type map_type,
		 u32 map_id, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);

DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
	TP_PROTO(const struct net_device *dev,
		 const struct bpf_prog *xdp,
		 const void *tgt, int err,
		 const struct bpf_map *map, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map, index)
		 enum bpf_map_type map_type,
		 u32 map_id, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);

#define _trace_xdp_redirect(dev, xdp, to)						\
	 trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to)
	 trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)

#define _trace_xdp_redirect_err(dev, xdp, to, err)					\
	 trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to)
	 trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)

#define _trace_xdp_redirect_map(dev, xdp, to, map, index)		\
	 trace_xdp_redirect(dev, xdp, to, 0, map, index)
#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
	 trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)

#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err)	\
	 trace_xdp_redirect_err(dev, xdp, to, err, map, index)
#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
	 trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)

/* not used anymore, but kept around so as not to break old programs */
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
	TP_PROTO(const struct net_device *dev,
		 const struct bpf_prog *xdp,
		 const void *tgt, int err,
		 const struct bpf_map *map, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map, index)
		 enum bpf_map_type map_type,
		 u32 map_id, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);

DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
	TP_PROTO(const struct net_device *dev,
		 const struct bpf_prog *xdp,
		 const void *tgt, int err,
		 const struct bpf_map *map, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map, index)
		 enum bpf_map_type map_type,
		 u32 map_id, u32 index),
	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
);

TRACE_EVENT(xdp_cpumap_kthread,
+7 −2
Original line number Diff line number Diff line
@@ -543,7 +543,6 @@ static void cpu_map_free(struct bpf_map *map)
	 * complete.
	 */

	bpf_clear_redirect_map(map);
	synchronize_rcu();

	/* For cpu_map the remote CPUs can still be using the entries
@@ -563,7 +562,7 @@ static void cpu_map_free(struct bpf_map *map)
	kfree(cmap);
}

struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
	struct bpf_cpu_map_entry *rcpu;
@@ -600,6 +599,11 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
	return 0;
}

static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
{
	return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem);
}

static int cpu_map_btf_id;
const struct bpf_map_ops cpu_map_ops = {
	.map_meta_equal		= bpf_map_meta_equal,
@@ -612,6 +616,7 @@ const struct bpf_map_ops cpu_map_ops = {
	.map_check_btf		= map_check_no_btf,
	.map_btf_name		= "bpf_cpu_map",
	.map_btf_id		= &cpu_map_btf_id,
	.map_redirect		= cpu_map_redirect,
};

static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
Loading