Commit ed69e066 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Andrii Nakryiko says:

====================
pull-request: bpf-next 2023-03-08

We've added 23 non-merge commits during the last 2 day(s) which contain
a total of 28 files changed, 414 insertions(+), 104 deletions(-).

The main changes are:

1) Add more precise memory usage reporting for all BPF map types,
   from Yafang Shao.

2) Add ARM32 USDT support to libbpf, from Puranjay Mohan.

3) Fix BTF_ID_LIST size causing problems in !CONFIG_DEBUG_INFO_BTF,
   from Nathan Chancellor.

4) IMA selftests fix, from Roberto Sassu.

5) libbpf fix in APK support code, from Daniel Müller.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (23 commits)
  selftests/bpf: Fix IMA test
  libbpf: USDT arm arg parsing support
  libbpf: Refactor parse_usdt_arg() to re-use code
  libbpf: Fix theoretical u32 underflow in find_cd() function
  bpf: enforce all maps having memory usage callback
  bpf: offload map memory usage
  bpf, net: xskmap memory usage
  bpf, net: sock_map memory usage
  bpf, net: bpf_local_storage memory usage
  bpf: local_storage memory usage
  bpf: bpf_struct_ops memory usage
  bpf: queue_stack_maps memory usage
  bpf: devmap memory usage
  bpf: cpumap memory usage
  bpf: bloom_filter memory usage
  bpf: ringbuf memory usage
  bpf: reuseport_array memory usage
  bpf: stackmap memory usage
  bpf: arraymap memory usage
  bpf: hashtab memory usage
  ...
====================

Link: https://lore.kernel.org/r/20230308193533.1671597-1-andrii@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 10369080 12fabae0
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -161,6 +161,8 @@ struct bpf_map_ops {
				     bpf_callback_t callback_fn,
				     void *callback_ctx, u64 flags);

	u64 (*map_mem_usage)(const struct bpf_map *map);

	/* BTF id of struct allocated by map_alloc */
	int *map_btf_id;

@@ -2622,6 +2624,7 @@ static inline bool bpf_map_is_offloaded(struct bpf_map *map)

struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr);
@@ -2693,6 +2696,11 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}

static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
{
	return 0;
}

static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
					    const union bpf_attr *kattr,
					    union bpf_attr __user *uattr)
+1 −0
Original line number Diff line number Diff line
@@ -164,5 +164,6 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
			 void *value, u64 map_flags, gfp_t gfp_flags);

void bpf_local_storage_free_rcu(struct rcu_head *rcu);
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);

#endif /* _BPF_LOCAL_STORAGE_H */
+1 −1
Original line number Diff line number Diff line
@@ -204,7 +204,7 @@ extern struct btf_id_set8 name;

#else

#define BTF_ID_LIST(name) static u32 __maybe_unused name[16];
#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
#define BTF_ID(prefix, name)
#define BTF_ID_FLAGS(prefix, name, ...)
#define BTF_ID_UNUSED
+1 −0
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@ struct xdp_umem {
struct xsk_map {
	struct bpf_map map;
	spinlock_t lock; /* Synchronize map updates */
	atomic_t count;
	struct xdp_sock __rcu *xsk_map[];
};

+28 −0
Original line number Diff line number Diff line
@@ -721,6 +721,28 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_
	return num_elems;
}

static u64 array_map_mem_usage(const struct bpf_map *map)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
	u32 elem_size = array->elem_size;
	u64 entries = map->max_entries;
	u64 usage = sizeof(*array);

	if (percpu) {
		usage += entries * sizeof(void *);
		usage += entries * elem_size * num_possible_cpus();
	} else {
		if (map->map_flags & BPF_F_MMAPABLE) {
			usage = PAGE_ALIGN(usage);
			usage += PAGE_ALIGN(entries * elem_size);
		} else {
			usage += entries * elem_size;
		}
	}
	return usage;
}

BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
const struct bpf_map_ops array_map_ops = {
	.map_meta_equal = array_map_meta_equal,
@@ -742,6 +764,7 @@ const struct bpf_map_ops array_map_ops = {
	.map_update_batch = generic_map_update_batch,
	.map_set_for_each_callback_args = map_set_for_each_callback_args,
	.map_for_each_callback = bpf_for_each_array_elem,
	.map_mem_usage = array_map_mem_usage,
	.map_btf_id = &array_map_btf_ids[0],
	.iter_seq_info = &iter_seq_info,
};
@@ -762,6 +785,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
	.map_update_batch = generic_map_update_batch,
	.map_set_for_each_callback_args = map_set_for_each_callback_args,
	.map_for_each_callback = bpf_for_each_array_elem,
	.map_mem_usage = array_map_mem_usage,
	.map_btf_id = &array_map_btf_ids[0],
	.iter_seq_info = &iter_seq_info,
};
@@ -1156,6 +1180,7 @@ const struct bpf_map_ops prog_array_map_ops = {
	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
	.map_release_uref = prog_array_map_clear,
	.map_seq_show_elem = prog_array_map_seq_show_elem,
	.map_mem_usage = array_map_mem_usage,
	.map_btf_id = &array_map_btf_ids[0],
};

@@ -1257,6 +1282,7 @@ const struct bpf_map_ops perf_event_array_map_ops = {
	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
	.map_release = perf_event_fd_array_release,
	.map_check_btf = map_check_no_btf,
	.map_mem_usage = array_map_mem_usage,
	.map_btf_id = &array_map_btf_ids[0],
};

@@ -1291,6 +1317,7 @@ const struct bpf_map_ops cgroup_array_map_ops = {
	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
	.map_check_btf = map_check_no_btf,
	.map_mem_usage = array_map_mem_usage,
	.map_btf_id = &array_map_btf_ids[0],
};
#endif
@@ -1379,5 +1406,6 @@ const struct bpf_map_ops array_of_maps_map_ops = {
	.map_lookup_batch = generic_map_lookup_batch,
	.map_update_batch = generic_map_update_batch,
	.map_check_btf = map_check_no_btf,
	.map_mem_usage = array_map_mem_usage,
	.map_btf_id = &array_map_btf_ids[0],
};
Loading