Commit b9989b59 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'Typeless/weak ksym for gen_loader + misc fixups'

Kumar Kartikeya says:

====================

Patches (1,2,3,6) add typeless and weak ksym support to gen_loader. It is follow
up for the recent kfunc from modules series.

The later patches (7,8) are misc fixes for selftests, and patch 4 for libbpf
where we try to be careful to not end up with fds == 0, as libbpf assumes in
various places that they are greater than 0. Patch 5 fixes up missing O_CLOEXEC
in libbpf.

Changelog:
----------
v4 -> v5
v4: https://lore.kernel.org/bpf/20211020191526.2306852-1-memxor@gmail.com

 * Address feedback from Andrii
   * Drop use of ensure_good_fd in unneeded call sites
   * Add sys_bpf_fd
   * Add _lskel suffix to all light skeletons and change all current selftests
   * Drop early break in close loop for sk_lookup
   * Fix other nits

v3 -> v4
v3: https://lore.kernel.org/bpf/20211014205644.1837280-1-memxor@gmail.com

 * Remove gpl_only = true from bpf_kallsyms_lookup_name (Alexei)
 * Add bpf_dump_raw_ok check to ensure kptr_restrict isn't bypassed (Alexei)

v2 -> v3
v2: https://lore.kernel.org/bpf/20211013073348.1611155-1-memxor@gmail.com

 * Address feedback from Song
   * Move ksym logging to separate helper to avoid code duplication
   * Move src_reg mask stuff to separate helper
   * Fix various other nits, add acks
     * __builtin_expect is used instead of likely to as skel_internal.h is
       included in isolation.

v1 -> v2
v1: https://lore.kernel.org/bpf/20211006002853.308945-1-memxor@gmail.com



 * Remove redundant OOM checks in emit_bpf_kallsyms_lookup_name
 * Use designated initializer for sk_lookup fd array (Jakub)
 * Do fd check for all fd returning low level APIs (Andrii, Alexei)
 * Make Fixes: tag quote commit message, use selftests/bpf prefix (Song, Andrii)
 * Split typeless and weak ksym support into separate patches, expand commit
   message (Song)
 * Fix duplication in selftests stemming from use of LSKELS_EXTRA (Song)
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 2895f48f efadf2ad
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -2110,6 +2110,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;

const struct bpf_func_proto *tracing_prog_func_proto(
  enum bpf_func_id func_id, const struct bpf_prog *prog);
+16 −0
Original line number Diff line number Diff line
@@ -4923,6 +4923,21 @@ union bpf_attr {
 *		Dynamically cast a *sk* pointer to a *unix_sock* pointer.
 *	Return
 *		*sk* if casting is valid, or **NULL** otherwise.
 *
 * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
 *	Description
 *		Get the address of a kernel symbol, returned in *res*. *res* is
 *		set to 0 if the symbol is not found.
 *	Return
 *		On success, zero. On error, a negative value.
 *
 *		**-EINVAL** if *flags* is not zero.
 *
 *		**-EINVAL** if string *name* is not the same size as *name_sz*.
 *
 *		**-ENOENT** if symbol is not found.
 *
 *		**-EPERM** if caller does not have permission to obtain kernel address.
 */
#define __BPF_FUNC_MAPPER(FN)		\
	FN(unspec),			\
@@ -5104,6 +5119,7 @@ union bpf_attr {
	FN(get_branch_snapshot),	\
	FN(trace_vprintk),		\
	FN(skc_to_unix_sock),		\
	FN(kallsyms_lookup_name),	\
	/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
+27 −0
Original line number Diff line number Diff line
@@ -4781,6 +4781,31 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
	.arg1_type	= ARG_ANYTHING,
};

BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
{
	if (flags)
		return -EINVAL;

	if (name_sz <= 1 || name[name_sz - 1])
		return -EINVAL;

	if (!bpf_dump_raw_ok(current_cred()))
		return -EPERM;

	*res = kallsyms_lookup_name(name);
	return *res ? 0 : -ENOENT;
}

const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
	.func		= bpf_kallsyms_lookup_name,
	.gpl_only	= false,
	.ret_type	= RET_INTEGER,
	.arg1_type	= ARG_PTR_TO_MEM,
	.arg2_type	= ARG_CONST_SIZE,
	.arg3_type	= ARG_ANYTHING,
	.arg4_type	= ARG_PTR_TO_LONG,
};

static const struct bpf_func_proto *
syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -4791,6 +4816,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
		return &bpf_btf_find_by_name_kind_proto;
	case BPF_FUNC_sys_close:
		return &bpf_sys_close_proto;
	case BPF_FUNC_kallsyms_lookup_name:
		return &bpf_kallsyms_lookup_name_proto;
	default:
		return tracing_prog_func_proto(func_id, prog);
	}
+16 −0
Original line number Diff line number Diff line
@@ -4923,6 +4923,21 @@ union bpf_attr {
 *		Dynamically cast a *sk* pointer to a *unix_sock* pointer.
 *	Return
 *		*sk* if casting is valid, or **NULL** otherwise.
 *
 * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
 *	Description
 *		Get the address of a kernel symbol, returned in *res*. *res* is
 *		set to 0 if the symbol is not found.
 *	Return
 *		On success, zero. On error, a negative value.
 *
 *		**-EINVAL** if *flags* is not zero.
 *
 *		**-EINVAL** if string *name* is not the same size as *name_sz*.
 *
 *		**-ENOENT** if symbol is not found.
 *
 *		**-EPERM** if caller does not have permission to obtain kernel address.
 */
#define __BPF_FUNC_MAPPER(FN)		\
	FN(unspec),			\
@@ -5104,6 +5119,7 @@ union bpf_attr {
	FN(get_branch_snapshot),	\
	FN(trace_vprintk),		\
	FN(skc_to_unix_sock),		\
	FN(kallsyms_lookup_name),	\
	/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
+22 −13
Original line number Diff line number Diff line
@@ -65,13 +65,22 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
	return syscall(__NR_bpf, cmd, attr, size);
}

static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
			     unsigned int size)
{
	int fd;

	fd = sys_bpf(cmd, attr, size);
	return ensure_good_fd(fd);
}

static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
{
	int retries = 5;
	int fd;

	do {
		fd = sys_bpf(BPF_PROG_LOAD, attr, size);
		fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
	} while (fd < 0 && errno == EAGAIN && retries-- > 0);

	return fd;
@@ -104,7 +113,7 @@ int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr
		attr.inner_map_fd = create_attr->inner_map_fd;
	attr.map_extra = create_attr->map_extra;

	fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -206,7 +215,7 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
		attr.numa_node = node;
	}

	fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -634,7 +643,7 @@ int bpf_obj_get(const char *pathname)
	memset(&attr, 0, sizeof(attr));
	attr.pathname = ptr_to_u64((void *)pathname);

	fd = sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -745,7 +754,7 @@ int bpf_link_create(int prog_fd, int target_fd,
		break;
	}
proceed:
	fd = sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -788,7 +797,7 @@ int bpf_iter_create(int link_fd)
	memset(&attr, 0, sizeof(attr));
	attr.iter_create.link_fd = link_fd;

	fd = sys_bpf(BPF_ITER_CREATE, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -946,7 +955,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
	memset(&attr, 0, sizeof(attr));
	attr.prog_id = id;

	fd = sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -958,7 +967,7 @@ int bpf_map_get_fd_by_id(__u32 id)
	memset(&attr, 0, sizeof(attr));
	attr.map_id = id;

	fd = sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -970,7 +979,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
	memset(&attr, 0, sizeof(attr));
	attr.btf_id = id;

	fd = sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -982,7 +991,7 @@ int bpf_link_get_fd_by_id(__u32 id)
	memset(&attr, 0, sizeof(attr));
	attr.link_id = id;

	fd = sys_bpf(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -1013,7 +1022,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
	attr.raw_tracepoint.name = ptr_to_u64(name);
	attr.raw_tracepoint.prog_fd = prog_fd;

	fd = sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

@@ -1033,7 +1042,7 @@ int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_s
		attr.btf_log_buf = ptr_to_u64(log_buf);
	}

	fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr));

	if (fd < 0 && !do_log && log_buf && log_buf_size) {
		do_log = true;
@@ -1075,7 +1084,7 @@ int bpf_enable_stats(enum bpf_stats_type type)
	memset(&attr, 0, sizeof(attr));
	attr.enable_stats.type = type;

	fd = sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
	fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
	return libbpf_err_errno(fd);
}

Loading