Commit b671c206 authored by Kui-Feng Lee's avatar Kui-Feng Lee Committed by Martin KaFai Lau
Browse files

bpf: Retire the struct_ops map kvalue->refcnt.



We have replaced kvalue-refcnt with synchronize_rcu() to wait for an
RCU grace period.

Maintenance of kvalue->refcnt was a complicated task, as we had to
simultaneously keep track of two reference counts: one for the
reference count of bpf_map. When the kvalue->refcnt reaches zero, we
also have to reduce the reference count on bpf_map - yet these steps
are not performed in an atomic manner and require us to be vigilant
when managing them. By eliminating kvalue->refcnt, we can make our
maintenance more straightforward as the refcount of bpf_map is now
solely managed!

To prevent the trampoline image of a struct_ops from being released
while it is still in use, we wait for an RCU grace period. The
setsockopt(TCP_CONGESTION, "...") command allows you to change your
socket's congestion control algorithm and can result in releasing the
old struct_ops implementation. It is fine. However, this function is
exposed through bpf_setsockopt(), it may be accessed by BPF programs
as well. To ensure that the trampoline image belonging to struct_op
can be safely called while its method is in use, the trampoline
safeguarde the BPF program with rcu_read_lock(). Doing so prevents any
destruction of the associated images before returning from a
trampoline and requires us to wait for an RCU grace period.

Signed-off-by: default avatarKui-Feng Lee <kuifeng@meta.com>
Link: https://lore.kernel.org/r/20230323032405.3735486-2-kuifeng@meta.com


Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parent b63cbc49
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1945,6 +1945,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
+44 −33
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/btf_ids.h>
#include <linux/rcupdate_wait.h>

enum bpf_struct_ops_state {
	BPF_STRUCT_OPS_STATE_INIT,
@@ -249,6 +250,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
	struct bpf_struct_ops_value *uvalue, *kvalue;
	enum bpf_struct_ops_state state;
	s64 refcnt;

	if (unlikely(*(u32 *)key != 0))
		return -ENOENT;
@@ -267,7 +269,14 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
	uvalue = value;
	memcpy(uvalue, st_map->uvalue, map->value_size);
	uvalue->state = state;
	refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));

	/* This value offers the user space a general estimate of how
	 * many sockets are still utilizing this struct_ops for TCP
	 * congestion control. The number might not be exact, but it
	 * should sufficiently meet our present goals.
	 */
	refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
	refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0));

	return 0;
}
@@ -491,7 +500,6 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
		*(unsigned long *)(udata + moff) = prog->aux->id;
	}

	refcount_set(&kvalue->refcnt, 1);
	bpf_map_inc(map);

	set_memory_rox((long)st_map->image, 1);
@@ -536,7 +544,6 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
	switch (prev_state) {
	case BPF_STRUCT_OPS_STATE_INUSE:
		st_map->st_ops->unreg(&st_map->kvalue.data);
		if (refcount_dec_and_test(&st_map->kvalue.refcnt))
		bpf_map_put(map);
		return 0;
	case BPF_STRUCT_OPS_STATE_TOBEFREE:
@@ -570,7 +577,7 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
	kfree(value);
}

static void bpf_struct_ops_map_free(struct bpf_map *map)
static void __bpf_struct_ops_map_free(struct bpf_map *map)
{
	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;

@@ -582,6 +589,28 @@ static void bpf_struct_ops_map_free(struct bpf_map *map)
	bpf_map_area_free(st_map);
}

static void bpf_struct_ops_map_free(struct bpf_map *map)
{
	/* The struct_ops's function may switch to another struct_ops.
	 *
	 * For example, bpf_tcp_cc_x->init() may switch to
	 * another tcp_cc_y by calling
	 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
	 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
	 * and its refcount may reach 0 which then free its
	 * trampoline image while tcp_cc_x is still running.
	 *
	 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
	 * to finish. bpf-tcp-cc prog is non sleepable.
	 * A rcu_tasks gp is to wait for the last few insn
	 * in the tramopline image to finish before releasing
	 * the trampoline image.
	 */
	synchronize_rcu_mult(call_rcu, call_rcu_tasks);

	__bpf_struct_ops_map_free(map);
}

static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
{
	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
@@ -630,7 +659,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
				   NUMA_NO_NODE);
	st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
	if (!st_map->uvalue || !st_map->links || !st_map->image) {
		bpf_struct_ops_map_free(map);
		__bpf_struct_ops_map_free(map);
		return ERR_PTR(-ENOMEM);
	}

@@ -676,41 +705,23 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
bool bpf_struct_ops_get(const void *kdata)
{
	struct bpf_struct_ops_value *kvalue;
	struct bpf_struct_ops_map *st_map;
	struct bpf_map *map;

	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);

	return refcount_inc_not_zero(&kvalue->refcnt);
}

static void bpf_struct_ops_put_rcu(struct rcu_head *head)
{
	struct bpf_struct_ops_map *st_map;

	st_map = container_of(head, struct bpf_struct_ops_map, rcu);
	bpf_map_put(&st_map->map);
	map = __bpf_map_inc_not_zero(&st_map->map, false);
	return !IS_ERR(map);
}

void bpf_struct_ops_put(const void *kdata)
{
	struct bpf_struct_ops_value *kvalue;
	struct bpf_struct_ops_map *st_map;

	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
	if (refcount_dec_and_test(&kvalue->refcnt)) {
		struct bpf_struct_ops_map *st_map;
	st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);

		st_map = container_of(kvalue, struct bpf_struct_ops_map,
				      kvalue);
		/* The struct_ops's function may switch to another struct_ops.
		 *
		 * For example, bpf_tcp_cc_x->init() may switch to
		 * another tcp_cc_y by calling
		 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
		 * During the switch,  bpf_struct_ops_put(tcp_cc_x) is called
		 * and its map->refcnt may reach 0 which then free its
		 * trampoline image while tcp_cc_x is still running.
		 *
		 * Thus, a rcu grace period is needed here.
		 */
		call_rcu(&st_map->rcu, bpf_struct_ops_put_rcu);
	}
	bpf_map_put(&st_map->map);
}
+4 −2
Original line number Diff line number Diff line
@@ -1303,8 +1303,10 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
	return map;
}

/* map_idr_lock should have been held */
static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
/* map_idr_lock should have been held or the map should have been
 * protected by rcu read lock.
 */
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
{
	int refold;