Commit fda64ae0 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov
Browse files

bpf: bpf_task_storage_delete_recur does lookup first before the deadlock check



Similar to the earlier change in bpf_task_storage_get_recur.
This patch changes bpf_task_storage_delete_recur such that it
does the lookup first.  It only returns -EBUSY if it needs to
take the spinlock to do the deletion when potential deadlock
is detected.

Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20221025184524.3526117-7-martin.lau@linux.dev


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 4279adb0
Loading
Loading
Loading
Loading
+11 −7
Original line number Diff line number Diff line
@@ -184,7 +184,8 @@ static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
	return err;
}

static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
			       bool nobusy)
{
	struct bpf_local_storage_data *sdata;

@@ -192,6 +193,9 @@ static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
	if (!sdata)
		return -ENOENT;

	if (!nobusy)
		return -EBUSY;

	bpf_selem_unlink(SELEM(sdata), true);

	return 0;
@@ -220,7 +224,7 @@ static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
	}

	bpf_task_storage_lock();
	err = task_storage_delete(task, map);
	err = task_storage_delete(task, map, true);
	bpf_task_storage_unlock();
out:
	put_pid(pid);
@@ -289,20 +293,20 @@ BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
	   task)
{
	bool nobusy;
	int ret;

	WARN_ON_ONCE(!bpf_rcu_lock_held());
	if (!task)
		return -EINVAL;

	if (!bpf_task_storage_trylock())
		return -EBUSY;

	nobusy = bpf_task_storage_trylock();
	/* This helper must only be called from places where the lifetime of the task
	 * is guaranteed. Either by being refcounted or by being protected
	 * by an RCU read-side critical section.
	 */
	ret = task_storage_delete(task, map);
	ret = task_storage_delete(task, map, nobusy);
	if (nobusy)
		bpf_task_storage_unlock();
	return ret;
}