Commit 86c7ecad authored by Andrii Nakryiko's avatar Andrii Nakryiko
Browse files

Merge branch 'libbpf 1.0: deprecate bpf_map__def() API'

Christy Lee says:

====================

bpf_map__def() is rarely used and non-extensible. bpf_map_def fields
can be accessed with appropriate map getters and setters instead.
Deprecate bpf_map__def() API and replace use cases with getters and
setters.

Changelog:
----------
v1 -> v2:
https://lore.kernel.org/all/20220105230057.853163-1-christylee@fb.com/



* Fixed commit messages to match commit titles
* Fixed indentation
* Removed bpf_map__def() usage that was missed in v1
====================

Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 0991f6a3 063fa26a
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -209,7 +209,7 @@ static struct datarec *alloc_record_per_cpu(void)

static struct record *alloc_record_per_rxq(void)
{
	unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
	unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
	struct record *array;

	array = calloc(nr_rxqs, sizeof(struct record));
@@ -222,7 +222,7 @@ static struct record *alloc_record_per_rxq(void)

static struct stats_record *alloc_stats_record(void)
{
	unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
	unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
	struct stats_record *rec;
	int i;

@@ -241,7 +241,7 @@ static struct stats_record *alloc_stats_record(void)

static void free_stats_record(struct stats_record *r)
{
	unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
	unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
	int i;

	for (i = 0; i < nr_rxqs; i++)
@@ -289,7 +289,7 @@ static void stats_collect(struct stats_record *rec)
	map_collect_percpu(fd, 0, &rec->stats);

	fd = bpf_map__fd(rx_queue_index_map);
	max_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
	max_rxqs = bpf_map__max_entries(rx_queue_index_map);
	for (i = 0; i < max_rxqs; i++)
		map_collect_percpu(fd, i, &rec->rxq[i]);
}
@@ -335,7 +335,7 @@ static void stats_print(struct stats_record *stats_rec,
			struct stats_record *stats_prev,
			int action, __u32 cfg_opt)
{
	unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
	unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
	unsigned int nr_cpus = bpf_num_possible_cpus();
	double pps = 0, err = 0;
	struct record *rec, *prev;
+6 −6
Original line number Diff line number Diff line
@@ -227,7 +227,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
		/* only generate definitions for memory-mapped internal maps */
		if (!bpf_map__is_internal(map))
			continue;
		if (!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
		if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
			continue;

		if (!get_map_ident(map, map_ident, sizeof(map_ident)))
@@ -468,7 +468,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
		if (!get_map_ident(map, ident, sizeof(ident)))
			continue;
		if (bpf_map__is_internal(map) &&
		    (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
		    (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
			printf("\tmunmap(skel->%1$s, %2$zd);\n",
			       ident, bpf_map_mmap_sz(map));
		codegen("\
@@ -536,7 +536,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
			continue;

		if (!bpf_map__is_internal(map) ||
		    !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
		    !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
			continue;

		codegen("\
@@ -600,10 +600,10 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
			continue;

		if (!bpf_map__is_internal(map) ||
		    !(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
		    !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
			continue;

		if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
		if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
			mmap_flags = "PROT_READ";
		else
			mmap_flags = "PROT_READ | PROT_WRITE";
@@ -961,7 +961,7 @@ static int do_skeleton(int argc, char **argv)
				i, bpf_map__name(map), i, ident);
			/* memory-mapped internal maps */
			if (bpf_map__is_internal(map) &&
			    (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) {
			    (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
				printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
				       i, ident);
			}
+1 −3
Original line number Diff line number Diff line
@@ -480,7 +480,6 @@ static int do_unregister(int argc, char **argv)
static int do_register(int argc, char **argv)
{
	LIBBPF_OPTS(bpf_object_open_opts, open_opts);
	const struct bpf_map_def *def;
	struct bpf_map_info info = {};
	__u32 info_len = sizeof(info);
	int nr_errs = 0, nr_maps = 0;
@@ -510,8 +509,7 @@ static int do_register(int argc, char **argv)
	}

	bpf_object__for_each_map(map, obj) {
		def = bpf_map__def(map);
		if (def->type != BPF_MAP_TYPE_STRUCT_OPS)
		if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
			continue;

		link = bpf_map__attach_struct_ops(map);
+2 −1
Original line number Diff line number Diff line
@@ -706,7 +706,8 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
/* get map definition */
LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead")
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
/* get map name */
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
/* get/set map type */
+27 −37
Original line number Diff line number Diff line
@@ -1002,24 +1002,22 @@ __bpf_map__config_value(struct bpf_map *map,
{
	struct bpf_map_op *op;
	const char *map_name = bpf_map__name(map);
	const struct bpf_map_def *def = bpf_map__def(map);

	if (IS_ERR(def)) {
		pr_debug("Unable to get map definition from '%s'\n",
			 map_name);
	if (!map) {
		pr_debug("Map '%s' is invalid\n", map_name);
		return -BPF_LOADER_ERRNO__INTERNAL;
	}

	if (def->type != BPF_MAP_TYPE_ARRAY) {
	if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
		pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
			 map_name);
		return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
	}
	if (def->key_size < sizeof(unsigned int)) {
	if (bpf_map__key_size(map) < sizeof(unsigned int)) {
		pr_debug("Map %s has incorrect key size\n", map_name);
		return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
	}
	switch (def->value_size) {
	switch (bpf_map__value_size(map)) {
	case 1:
	case 2:
	case 4:
@@ -1061,7 +1059,6 @@ __bpf_map__config_event(struct bpf_map *map,
			struct parse_events_term *term,
			struct evlist *evlist)
{
	const struct bpf_map_def *def;
	struct bpf_map_op *op;
	const char *map_name = bpf_map__name(map);
	struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
@@ -1072,18 +1069,16 @@ __bpf_map__config_event(struct bpf_map *map,
		return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
	}

	def = bpf_map__def(map);
	if (IS_ERR(def)) {
		pr_debug("Unable to get map definition from '%s'\n",
			 map_name);
		return PTR_ERR(def);
	if (!map) {
		pr_debug("Map '%s' is invalid\n", map_name);
		return PTR_ERR(map);
	}

	/*
	 * No need to check key_size and value_size:
	 * kernel has already checked them.
	 */
	if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
	if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
		pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
			 map_name);
		return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
@@ -1132,7 +1127,6 @@ config_map_indices_range_check(struct parse_events_term *term,
			       const char *map_name)
{
	struct parse_events_array *array = &term->array;
	const struct bpf_map_def *def;
	unsigned int i;

	if (!array->nr_ranges)
@@ -1143,10 +1137,8 @@ config_map_indices_range_check(struct parse_events_term *term,
		return -BPF_LOADER_ERRNO__INTERNAL;
	}

	def = bpf_map__def(map);
	if (IS_ERR(def)) {
		pr_debug("ERROR: Unable to get map definition from '%s'\n",
			 map_name);
	if (!map) {
		pr_debug("Map '%s' is invalid\n", map_name);
		return -BPF_LOADER_ERRNO__INTERNAL;
	}

@@ -1155,7 +1147,7 @@ config_map_indices_range_check(struct parse_events_term *term,
		size_t length = array->ranges[i].length;
		unsigned int idx = start + length - 1;

		if (idx >= def->max_entries) {
		if (idx >= bpf_map__max_entries(map)) {
			pr_debug("ERROR: index %d too large\n", idx);
			return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
		}
@@ -1248,21 +1240,21 @@ int bpf__config_obj(struct bpf_object *obj,
}

typedef int (*map_config_func_t)(const char *name, int map_fd,
				 const struct bpf_map_def *pdef,
				 const struct bpf_map *map,
				 struct bpf_map_op *op,
				 void *pkey, void *arg);

static int
foreach_key_array_all(map_config_func_t func,
		      void *arg, const char *name,
		      int map_fd, const struct bpf_map_def *pdef,
		      int map_fd, const struct bpf_map *map,
		      struct bpf_map_op *op)
{
	unsigned int i;
	int err;

	for (i = 0; i < pdef->max_entries; i++) {
		err = func(name, map_fd, pdef, op, &i, arg);
	for (i = 0; i < bpf_map__max_entries(map); i++) {
		err = func(name, map_fd, map, op, &i, arg);
		if (err) {
			pr_debug("ERROR: failed to insert value to %s[%u]\n",
				 name, i);
@@ -1275,7 +1267,7 @@ foreach_key_array_all(map_config_func_t func,
static int
foreach_key_array_ranges(map_config_func_t func, void *arg,
			 const char *name, int map_fd,
			 const struct bpf_map_def *pdef,
			 const struct bpf_map *map,
			 struct bpf_map_op *op)
{
	unsigned int i, j;
@@ -1288,7 +1280,7 @@ foreach_key_array_ranges(map_config_func_t func, void *arg,
		for (j = 0; j < length; j++) {
			unsigned int idx = start + j;

			err = func(name, map_fd, pdef, op, &idx, arg);
			err = func(name, map_fd, map, op, &idx, arg);
			if (err) {
				pr_debug("ERROR: failed to insert value to %s[%u]\n",
					 name, idx);
@@ -1304,9 +1296,8 @@ bpf_map_config_foreach_key(struct bpf_map *map,
			   map_config_func_t func,
			   void *arg)
{
	int err, map_fd;
	int err, map_fd, type;
	struct bpf_map_op *op;
	const struct bpf_map_def *def;
	const char *name = bpf_map__name(map);
	struct bpf_map_priv *priv = bpf_map__priv(map);

@@ -1319,9 +1310,8 @@ bpf_map_config_foreach_key(struct bpf_map *map,
		return 0;
	}

	def = bpf_map__def(map);
	if (IS_ERR(def)) {
		pr_debug("ERROR: failed to get definition from map %s\n", name);
	if (!map) {
		pr_debug("Map '%s' is invalid\n", name);
		return -BPF_LOADER_ERRNO__INTERNAL;
	}
	map_fd = bpf_map__fd(map);
@@ -1330,19 +1320,19 @@ bpf_map_config_foreach_key(struct bpf_map *map,
		return map_fd;
	}

	type = bpf_map__type(map);
	list_for_each_entry(op, &priv->ops_list, list) {
		switch (def->type) {
		switch (type) {
		case BPF_MAP_TYPE_ARRAY:
		case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
			switch (op->key_type) {
			case BPF_MAP_KEY_ALL:
				err = foreach_key_array_all(func, arg, name,
							    map_fd, def, op);
							    map_fd, map, op);
				break;
			case BPF_MAP_KEY_RANGES:
				err = foreach_key_array_ranges(func, arg, name,
							       map_fd, def,
							       op);
							       map_fd, map, op);
				break;
			default:
				pr_debug("ERROR: keytype for map '%s' invalid\n",
@@ -1451,7 +1441,7 @@ apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,

static int
apply_obj_config_map_for_key(const char *name, int map_fd,
			     const struct bpf_map_def *pdef,
			     const struct bpf_map *map,
			     struct bpf_map_op *op,
			     void *pkey, void *arg __maybe_unused)
{
@@ -1460,7 +1450,7 @@ apply_obj_config_map_for_key(const char *name, int map_fd,
	switch (op->op_type) {
	case BPF_MAP_OP_SET_VALUE:
		err = apply_config_value_for_key(map_fd, pkey,
						 pdef->value_size,
						 bpf_map__value_size(map),
						 op->v.value);
		break;
	case BPF_MAP_OP_SET_EVSEL:
Loading