Commit 400031e0 authored by David Vernet's avatar David Vernet Committed by Daniel Borkmann
Browse files

bpf: Add __bpf_kfunc tag to all kfuncs



Now that we have the __bpf_kfunc tag, we should use add it to all
existing kfuncs to ensure that they'll never be elided in LTO builds.

Signed-off-by: default avatarDavid Vernet <void@manifault.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarStanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/bpf/20230201173016.342758-4-void@manifault.com
parent 98e6ab7a
Loading
Loading
Loading
Loading
+30 −30
Original line number Diff line number Diff line
@@ -48,7 +48,7 @@ __diag_ignore_all("-Wmissing-prototypes",
 * bpf_cpumask_create() allocates memory using the BPF memory allocator, and
 * will not block. It may return NULL if no memory is available.
 */
struct bpf_cpumask *bpf_cpumask_create(void)
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void)
{
	struct bpf_cpumask *cpumask;

@@ -74,7 +74,7 @@ struct bpf_cpumask *bpf_cpumask_create(void)
 * must either be embedded in a map as a kptr, or freed with
 * bpf_cpumask_release().
 */
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
{
	refcount_inc(&cpumask->usage);
	return cpumask;
@@ -90,7 +90,7 @@ struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
 * kptr, or freed with bpf_cpumask_release(). This function may return NULL if
 * no BPF cpumask was found in the specified map value.
 */
struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
{
	struct bpf_cpumask *cpumask;

@@ -116,7 +116,7 @@ struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
 * reference of the BPF cpumask has been released, it is subsequently freed in
 * an RCU callback in the BPF memory allocator.
 */
void bpf_cpumask_release(struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
{
	if (!cpumask)
		return;
@@ -135,7 +135,7 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask)
 * Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask
 * pointer may be safely passed to this function.
 */
u32 bpf_cpumask_first(const struct cpumask *cpumask)
__bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask)
{
	return cpumask_first(cpumask);
}
@@ -148,7 +148,7 @@ u32 bpf_cpumask_first(const struct cpumask *cpumask)
 * Find the index of the first unset bit of the cpumask. A struct bpf_cpumask
 * pointer may be safely passed to this function.
 */
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
__bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
{
	return cpumask_first_zero(cpumask);
}
@@ -158,7 +158,7 @@ u32 bpf_cpumask_first_zero(const struct cpumask *cpumask)
 * @cpu: The CPU to be set in the cpumask.
 * @cpumask: The BPF cpumask in which a bit is being set.
 */
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
	if (!cpu_valid(cpu))
		return;
@@ -171,7 +171,7 @@ void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
 * @cpu: The CPU to be cleared from the cpumask.
 * @cpumask: The BPF cpumask in which a bit is being cleared.
 */
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
	if (!cpu_valid(cpu))
		return;
@@ -188,7 +188,7 @@ void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
 * * true  - @cpu is set in the cpumask
 * * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu.
 */
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
{
	if (!cpu_valid(cpu))
		return false;
@@ -205,7 +205,7 @@ bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask)
 * * true  - @cpu is set in the cpumask
 * * false - @cpu was not set in the cpumask, or @cpu is invalid.
 */
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
	if (!cpu_valid(cpu))
		return false;
@@ -223,7 +223,7 @@ bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask)
 * * true  - @cpu is set in the cpumask
 * * false - @cpu was not set in the cpumask, or @cpu is invalid.
 */
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
{
	if (!cpu_valid(cpu))
		return false;
@@ -235,7 +235,7 @@ bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask)
 * bpf_cpumask_setall() - Set all of the bits in a BPF cpumask.
 * @cpumask: The BPF cpumask having all of its bits set.
 */
void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
{
	cpumask_setall((struct cpumask *)cpumask);
}
@@ -244,7 +244,7 @@ void bpf_cpumask_setall(struct bpf_cpumask *cpumask)
 * bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask.
 * @cpumask: The BPF cpumask being cleared.
 */
void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
__bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
{
	cpumask_clear((struct cpumask *)cpumask);
}
@@ -261,7 +261,7 @@ void bpf_cpumask_clear(struct bpf_cpumask *cpumask)
 *
 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
 */
bool bpf_cpumask_and(struct bpf_cpumask *dst,
__bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst,
				 const struct cpumask *src1,
				 const struct cpumask *src2)
{
@@ -276,7 +276,7 @@ bool bpf_cpumask_and(struct bpf_cpumask *dst,
 *
 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
 */
void bpf_cpumask_or(struct bpf_cpumask *dst,
__bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst,
				const struct cpumask *src1,
				const struct cpumask *src2)
{
@@ -291,7 +291,7 @@ void bpf_cpumask_or(struct bpf_cpumask *dst,
 *
 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
 */
void bpf_cpumask_xor(struct bpf_cpumask *dst,
__bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst,
				 const struct cpumask *src1,
				 const struct cpumask *src2)
{
@@ -309,7 +309,7 @@ void bpf_cpumask_xor(struct bpf_cpumask *dst,
 *
 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
 */
bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
{
	return cpumask_equal(src1, src2);
}
@@ -325,7 +325,7 @@ bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2)
 *
 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
 */
bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2)
{
	return cpumask_intersects(src1, src2);
}
@@ -341,7 +341,7 @@ bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *sr
 *
 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
 */
bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
{
	return cpumask_subset(src1, src2);
}
@@ -356,7 +356,7 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2)
 *
 * A struct bpf_cpumask pointer may be safely passed to @cpumask.
 */
bool bpf_cpumask_empty(const struct cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask)
{
	return cpumask_empty(cpumask);
}
@@ -371,7 +371,7 @@ bool bpf_cpumask_empty(const struct cpumask *cpumask)
 *
 * A struct bpf_cpumask pointer may be safely passed to @cpumask.
 */
bool bpf_cpumask_full(const struct cpumask *cpumask)
__bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask)
{
	return cpumask_full(cpumask);
}
@@ -383,7 +383,7 @@ bool bpf_cpumask_full(const struct cpumask *cpumask)
 *
 * A struct bpf_cpumask pointer may be safely passed to @src.
 */
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
__bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
{
	cpumask_copy((struct cpumask *)dst, src);
}
@@ -398,7 +398,7 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src)
 *
 * A struct bpf_cpumask pointer may be safely passed to @src.
 */
u32 bpf_cpumask_any(const struct cpumask *cpumask)
__bpf_kfunc u32 bpf_cpumask_any(const struct cpumask *cpumask)
{
	return cpumask_any(cpumask);
}
@@ -415,7 +415,7 @@ u32 bpf_cpumask_any(const struct cpumask *cpumask)
 *
 * struct bpf_cpumask pointers may be safely passed to @src1 and @src2.
 */
u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
__bpf_kfunc u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2)
{
	return cpumask_any_and(src1, src2);
}
+19 −19
Original line number Diff line number Diff line
@@ -1776,7 +1776,7 @@ __diag_push();
__diag_ignore_all("-Wmissing-prototypes",
		  "Global functions as their definitions will be in vmlinux BTF");

void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
{
	struct btf_struct_meta *meta = meta__ign;
	u64 size = local_type_id__k;
@@ -1790,7 +1790,7 @@ void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
	return p;
}

void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
{
	struct btf_struct_meta *meta = meta__ign;
	void *p = p__alloc;
@@ -1811,12 +1811,12 @@ static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *hea
	tail ? list_add_tail(n, h) : list_add(n, h);
}

void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
__bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node)
{
	return __bpf_list_add(node, head, false);
}

void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
__bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node)
{
	return __bpf_list_add(node, head, true);
}
@@ -1834,12 +1834,12 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai
	return (struct bpf_list_node *)n;
}

struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
{
	return __bpf_list_del(head, false);
}

struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
{
	return __bpf_list_del(head, true);
}
@@ -1850,7 +1850,7 @@ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
 * bpf_task_release().
 * @p: The task on which a reference is being acquired.
 */
struct task_struct *bpf_task_acquire(struct task_struct *p)
__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
{
	return get_task_struct(p);
}
@@ -1861,7 +1861,7 @@ struct task_struct *bpf_task_acquire(struct task_struct *p)
 * released by calling bpf_task_release().
 * @p: The task on which a reference is being acquired.
 */
struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
{
	/* For the time being this function returns NULL, as it's not currently
	 * possible to safely acquire a reference to a task with RCU protection
@@ -1913,7 +1913,7 @@ struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
 * be released by calling bpf_task_release().
 * @pp: A pointer to a task kptr on which a reference is being acquired.
 */
struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
{
	/* We must return NULL here until we have clarity on how to properly
	 * leverage RCU for ensuring a task's lifetime. See the comment above
@@ -1926,7 +1926,7 @@ struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
 * bpf_task_release - Release the reference acquired on a task.
 * @p: The task on which a reference is being released.
 */
void bpf_task_release(struct task_struct *p)
__bpf_kfunc void bpf_task_release(struct task_struct *p)
{
	if (!p)
		return;
@@ -1941,7 +1941,7 @@ void bpf_task_release(struct task_struct *p)
 * calling bpf_cgroup_release().
 * @cgrp: The cgroup on which a reference is being acquired.
 */
struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
{
	cgroup_get(cgrp);
	return cgrp;
@@ -1953,7 +1953,7 @@ struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
 * be released by calling bpf_cgroup_release().
 * @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
 */
struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
{
	struct cgroup *cgrp;

@@ -1985,7 +1985,7 @@ struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
 * drops to 0.
 * @cgrp: The cgroup on which a reference is being released.
 */
void bpf_cgroup_release(struct cgroup *cgrp)
__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
{
	if (!cgrp)
		return;
@@ -2000,7 +2000,7 @@ void bpf_cgroup_release(struct cgroup *cgrp)
 * @cgrp: The cgroup for which we're performing a lookup.
 * @level: The level of ancestor to look up.
 */
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
{
	struct cgroup *ancestor;

@@ -2019,7 +2019,7 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
 * stored in a map, or released with bpf_task_release().
 * @pid: The pid of the task being looked up.
 */
struct task_struct *bpf_task_from_pid(s32 pid)
__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
{
	struct task_struct *p;

@@ -2032,22 +2032,22 @@ struct task_struct *bpf_task_from_pid(s32 pid)
	return p;
}

void *bpf_cast_to_kern_ctx(void *obj)
__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
{
	return obj;
}

void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
{
	return obj__ign;
}

void bpf_rcu_read_lock(void)
__bpf_kfunc void bpf_rcu_read_lock(void)
{
	rcu_read_lock();
}

void bpf_rcu_read_unlock(void)
__bpf_kfunc void bpf_rcu_read_unlock(void)
{
	rcu_read_unlock();
}
+2 −2
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@ static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
 * rstat_cpu->updated_children list.  See the comment on top of
 * cgroup_rstat_cpu definition for details.
 */
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
{
	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
	unsigned long flags;
@@ -231,7 +231,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
 *
 * This function may block.
 */
void cgroup_rstat_flush(struct cgroup *cgrp)
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
	might_sleep();

+2 −1
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/btf.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
@@ -975,7 +976,7 @@ void __noclone __crash_kexec(struct pt_regs *regs)
}
STACK_FRAME_NON_STANDARD(__crash_kexec);

void crash_kexec(struct pt_regs *regs)
__bpf_kfunc void crash_kexec(struct pt_regs *regs)
{
	int old_cpu, this_cpu;

+4 −4
Original line number Diff line number Diff line
@@ -1236,7 +1236,7 @@ __diag_ignore_all("-Wmissing-prototypes",
 * Return: a bpf_key pointer with a valid key pointer if the key is found, a
 *         NULL pointer otherwise.
 */
struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
__bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
{
	key_ref_t key_ref;
	struct bpf_key *bkey;
@@ -1285,7 +1285,7 @@ struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
 * Return: a bpf_key pointer with an invalid key pointer set from the
 *         pre-determined ID on success, a NULL pointer otherwise
 */
struct bpf_key *bpf_lookup_system_key(u64 id)
__bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
{
	struct bpf_key *bkey;

@@ -1309,7 +1309,7 @@ struct bpf_key *bpf_lookup_system_key(u64 id)
 * Decrement the reference count of the key inside *bkey*, if the pointer
 * is valid, and free *bkey*.
 */
void bpf_key_put(struct bpf_key *bkey)
__bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
{
	if (bkey->has_ref)
		key_put(bkey->key);
@@ -1329,7 +1329,7 @@ void bpf_key_put(struct bpf_key *bkey)
 *
 * Return: 0 on success, a negative value on error.
 */
int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
			       struct bpf_dynptr_kern *sig_ptr,
			       struct bpf_key *trusted_keyring)
{
Loading