Commit e8b02296 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov
Browse files

bpf: Avoid taking spinlock in bpf_task_storage_get if potential deadlock is detected



bpf_task_storage_get() does a lookup and optionally inserts
new data if BPF_LOCAL_STORAGE_GET_F_CREATE is present.

During lookup, it will cache the lookup result and caching requires to
acquire a spinlock.  When potential deadlock is detected (by the
bpf_task_storage_busy pcpu-counter added in
commit bc235cdb ("bpf: Prevent deadlock from recursive bpf_task_storage_[get|delete]")),
the current behavior is returning NULL immediately to avoid deadlock.  It is
too pessimistic.  This patch will go ahead to do a lookup (which is a
lockless operation) but it will avoid caching it in order to avoid
acquiring the spinlock.

When lookup fails to find the data and BPF_LOCAL_STORAGE_GET_F_CREATE
is set, an insertion is needed and this requires acquiring a spinlock.
This patch will still return NULL when a potential deadlock is detected.

Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20221025184524.3526117-5-martin.lau@linux.dev


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 6d65500c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -242,6 +242,7 @@ void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
	__bpf_selem_unlink_storage(selem, use_trace_rcu);
}

/* If cacheit_lockit is false, this lookup function is lockless */
struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
			 struct bpf_local_storage_map *smap,
+8 −7
Original line number Diff line number Diff line
@@ -230,17 +230,17 @@ static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
/* Called by bpf_task_storage_get*() helpers */
static void *__bpf_task_storage_get(struct bpf_map *map,
				    struct task_struct *task, void *value,
				    u64 flags, gfp_t gfp_flags)
				    u64 flags, gfp_t gfp_flags, bool nobusy)
{
	struct bpf_local_storage_data *sdata;

	sdata = task_storage_lookup(task, map, true);
	sdata = task_storage_lookup(task, map, nobusy);
	if (sdata)
		return sdata->data;

	/* only allocate new storage, when the task is refcounted */
	if (refcount_read(&task->usage) &&
	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE)) {
	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
		sdata = bpf_local_storage_update(
			task, (struct bpf_local_storage_map *)map, value,
			BPF_NOEXIST, gfp_flags);
@@ -254,16 +254,17 @@ static void *__bpf_task_storage_get(struct bpf_map *map,
BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
	   task, void *, value, u64, flags, gfp_t, gfp_flags)
{
	bool nobusy;
	void *data;

	WARN_ON_ONCE(!bpf_rcu_lock_held());
	if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
		return (unsigned long)NULL;

	if (!bpf_task_storage_trylock())
		return (unsigned long)NULL;
	nobusy = bpf_task_storage_trylock();
	data = __bpf_task_storage_get(map, task, value, flags,
				      gfp_flags);
				      gfp_flags, nobusy);
	if (nobusy)
		bpf_task_storage_unlock();
	return (unsigned long)data;
}