Commit 15157d2e authored by Andrii Nakryiko's avatar Andrii Nakryiko
Browse files

Merge branch 'libbpf: Resolve unambigous forward declarations'

Eduard Zingerman says:

====================

The patch-set is consists of the following parts:
- An update for libbpf's hashmap interface from void* -> void* to a
  polymorphic one, allowing to use both long and void* keys and values
  w/o additional casts. Interface functions are hidden behind
  auxiliary macro that add casts as necessary.

  This simplifies many use cases in libbpf as hashmaps there are
  mostly integer to integer and required awkward looking incantations
  like `(void *)(long)off` previously. Also includes updates for perf
  as it copies map implementation from libbpf.

- A change to `lib/bpf/btf.c:btf__dedup` that adds a new pass named
  "Resolve unambiguous forward declaration". This pass builds a
  hashmap `name_off -> uniquely named struct or union` and uses it to
  replace FWD types by structs or unions. This is necessary for corner
  cases when FWD is not used as a part of some struct or union
  definition de-duplicated by `btf_dedup_struct_types`.

The goal of the patch-set is to resolve forward declarations that
don't take part in type graphs comparisons if declaration name is
unambiguous.

Example:

CU #1:

struct foo;              // standalone forward declaration
struct foo *some_global;

CU #2:

struct foo { int x; };
struct foo *another_global;

Currently the de-duplicated BTF for this example looks as follows:

[1] STRUCT 'foo' size=4 vlen=1 ...
[2] INT 'int' size=4 ...
[3] PTR '(anon)' type_id=1
[4] FWD 'foo' fwd_kind=struct
[5] PTR '(anon)' type_id=4

The goal of this patch-set is to simplify it as follows:

[1] STRUCT 'foo' size=4 vlen=1
	'x' type_id=2 bits_offset=0
[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
[3] PTR '(anon)' type_id=1

For defconfig kernel with BTF enabled this removes 63 forward
declarations.

For allmodconfig kernel with BTF enabled this removes ~5K out of ~21K
forward declarations in ko objects. This unlocks some additional
de-duplication in ko objects, but impact is tiny: ~13K less BTF ids
out of ~2M.

Changelog:
 v3 -> v4
 Changes suggested by Andrii:
 - hashmap interface rework to allow use of integer and pointer keys
   an values w/o casts as suggested in [1].
 v2 -> v3
 Changes suggested by Andrii:
 - perf's util/hashtable.{c,h} are synchronized with libbpf
   implementation, perf's source code updated accordingly;
 - changes to libbpf, bpf selftests and perf are combined in a single
   patch to simplify bisecting;
 - hashtable interface updated to be long -> long instead of
   uintptr_t -> uintptr_t;
 - btf_dedup_resolve_fwds updated to correctly use IS_ERR / PTR_ERR
   macro;
 - test cases for btf_dedup_resolve_fwds are updated for better
   clarity.
 v1 -> v2
 - Style fixes in btf_dedup_resolve_fwd and btf_dedup_resolve_fwds as
   suggested by Alan.

[v1] https://lore.kernel.org/bpf/20221102110905.2433622-1-eddyz87@gmail.com/
[v2] https://lore.kernel.org/bpf/20221103033430.2611623-1-eddyz87@gmail.com/
[v3] https://lore.kernel.org/bpf/20221106202910.4193104-1-eddyz87@gmail.com/

[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/


====================

Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents e5659e4e 99e18fad
Loading
Loading
Loading
Loading
+9 −16
Original line number Diff line number Diff line
@@ -815,8 +815,7 @@ build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
		if (!btf_id)
			continue;

		err = hashmap__append(tab, u32_as_hash_field(btf_id),
				      u32_as_hash_field(id));
		err = hashmap__append(tab, btf_id, id);
		if (err) {
			p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s",
			      btf_id, id, strerror(-err));
@@ -875,17 +874,13 @@ show_btf_plain(struct bpf_btf_info *info, int fd,
	printf("size %uB", info->btf_size);

	n = 0;
	hashmap__for_each_key_entry(btf_prog_table, entry,
				    u32_as_hash_field(info->id)) {
		printf("%s%u", n++ == 0 ? "  prog_ids " : ",",
		       hash_field_as_u32(entry->value));
	hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
		printf("%s%lu", n++ == 0 ? "  prog_ids " : ",", entry->value);
	}

	n = 0;
	hashmap__for_each_key_entry(btf_map_table, entry,
				    u32_as_hash_field(info->id)) {
		printf("%s%u", n++ == 0 ? "  map_ids " : ",",
		       hash_field_as_u32(entry->value));
	hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
		printf("%s%lu", n++ == 0 ? "  map_ids " : ",", entry->value);
	}

	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
@@ -907,17 +902,15 @@ show_btf_json(struct bpf_btf_info *info, int fd,

	jsonw_name(json_wtr, "prog_ids");
	jsonw_start_array(json_wtr);	/* prog_ids */
	hashmap__for_each_key_entry(btf_prog_table, entry,
				    u32_as_hash_field(info->id)) {
		jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
	hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
		jsonw_uint(json_wtr, entry->value);
	}
	jsonw_end_array(json_wtr);	/* prog_ids */

	jsonw_name(json_wtr, "map_ids");
	jsonw_start_array(json_wtr);	/* map_ids */
	hashmap__for_each_key_entry(btf_map_table, entry,
				    u32_as_hash_field(info->id)) {
		jsonw_uint(json_wtr, hash_field_as_u32(entry->value));
	hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
		jsonw_uint(json_wtr, entry->value);
	}
	jsonw_end_array(json_wtr);	/* map_ids */

+5 −5
Original line number Diff line number Diff line
@@ -494,7 +494,7 @@ static int do_build_table_cb(const char *fpath, const struct stat *sb,
		goto out_close;
	}

	err = hashmap__append(build_fn_table, u32_as_hash_field(pinned_info.id), path);
	err = hashmap__append(build_fn_table, pinned_info.id, path);
	if (err) {
		p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
		      pinned_info.id, path, strerror(errno));
@@ -545,7 +545,7 @@ void delete_pinned_obj_table(struct hashmap *map)
		return;

	hashmap__for_each_entry(map, entry, bkt)
		free(entry->value);
		free(entry->pvalue);

	hashmap__free(map);
}
@@ -1041,12 +1041,12 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
	return fd;
}

size_t hash_fn_for_key_as_id(const void *key, void *ctx)
size_t hash_fn_for_key_as_id(long key, void *ctx)
{
	return (size_t)key;
	return key;
}

bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx)
bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
{
	return k1 == k2;
}
+7 −12
Original line number Diff line number Diff line
@@ -1660,21 +1660,16 @@ struct btfgen_info {
	struct btf *marked_btf; /* btf structure used to mark used types */
};

static size_t btfgen_hash_fn(const void *key, void *ctx)
static size_t btfgen_hash_fn(long key, void *ctx)
{
	return (size_t)key;
	return key;
}

static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx)
static bool btfgen_equal_fn(long k1, long k2, void *ctx)
{
	return k1 == k2;
}

static void *u32_as_hash_key(__u32 x)
{
	return (void *)(uintptr_t)x;
}

static void btfgen_free_info(struct btfgen_info *info)
{
	if (!info)
@@ -2086,18 +2081,18 @@ static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
			struct bpf_core_spec specs_scratch[3] = {};
			struct bpf_core_relo_res targ_res = {};
			struct bpf_core_cand_list *cands = NULL;
			const void *type_key = u32_as_hash_key(relo->type_id);
			const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);

			if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
			    !hashmap__find(cand_cache, type_key, (void **)&cands)) {
			    !hashmap__find(cand_cache, relo->type_id, &cands)) {
				cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
				if (!cands) {
					err = -errno;
					goto out;
				}

				err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
				err = hashmap__set(cand_cache, relo->type_id, cands,
						   NULL, NULL);
				if (err)
					goto out;
			}
@@ -2120,7 +2115,7 @@ static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)

	if (!IS_ERR_OR_NULL(cand_cache)) {
		hashmap__for_each_entry(cand_cache, entry, i) {
			bpf_core_free_cands(entry->value);
			bpf_core_free_cands(entry->pvalue);
		}
		hashmap__free(cand_cache);
	}
+4 −6
Original line number Diff line number Diff line
@@ -204,9 +204,8 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)

		jsonw_name(json_wtr, "pinned");
		jsonw_start_array(json_wtr);
		hashmap__for_each_key_entry(link_table, entry,
					    u32_as_hash_field(info->id))
			jsonw_string(json_wtr, entry->value);
		hashmap__for_each_key_entry(link_table, entry, info->id)
			jsonw_string(json_wtr, entry->pvalue);
		jsonw_end_array(json_wtr);
	}

@@ -309,9 +308,8 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
	if (!hashmap__empty(link_table)) {
		struct hashmap_entry *entry;

		hashmap__for_each_key_entry(link_table, entry,
					    u32_as_hash_field(info->id))
			printf("\n\tpinned %s", (char *)entry->value);
		hashmap__for_each_key_entry(link_table, entry, info->id)
			printf("\n\tpinned %s", (char *)entry->pvalue);
	}
	emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");

+2 −12
Original line number Diff line number Diff line
@@ -240,8 +240,8 @@ int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
int print_all_levels(__maybe_unused enum libbpf_print_level level,
		     const char *format, va_list args);

size_t hash_fn_for_key_as_id(const void *key, void *ctx);
bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
size_t hash_fn_for_key_as_id(long key, void *ctx);
bool equal_fn_for_key_as_id(long k1, long k2, void *ctx);

/* bpf_attach_type_input_str - convert the provided attach type value into a
 * textual representation that we accept for input purposes.
@@ -257,16 +257,6 @@ bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
 */
const char *bpf_attach_type_input_str(enum bpf_attach_type t);

static inline void *u32_as_hash_field(__u32 x)
{
	return (void *)(uintptr_t)x;
}

static inline __u32 hash_field_as_u32(const void *x)
{
	return (__u32)(uintptr_t)x;
}

static inline bool hashmap__empty(struct hashmap *map)
{
	return map ? hashmap__size(map) == 0 : true;
Loading