Commit ad061cf4 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Daniel Borkmann says:

====================
pull-request: bpf 2022-10-03

We've added 10 non-merge commits during the last 23 day(s) which contain
a total of 14 files changed, 130 insertions(+), 69 deletions(-).

The main changes are:

1) Fix dynptr helper API to gate behind CAP_BPF given it was not intended
   for unprivileged BPF programs, from Kumar Kartikeya Dwivedi.

2) Fix need_wakeup flag inheritance from umem buffer pool for shared xsk
   sockets, from Jalal Mostafa.

3) Fix truncated last_member_type_id in btf_struct_resolve() which had a
   wrong storage type, from Lorenz Bauer.

4) Fix xsk back-pressure mechanism on tx when amount of produced
   descriptors to CQ is lower than what was grabbed from xsk tx ring,
   from Maciej Fijalkowski.

5) Fix wrong cgroup attach flags being displayed to effective progs,
   from Pu Lehui.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  xsk: Inherit need_wakeup flag for shared sockets
  bpf: Gate dynptr API behind CAP_BPF
  selftests/bpf: Adapt cgroup effective query uapi change
  bpftool: Fix wrong cgroup attach flags being assigned to effective progs
  bpf, cgroup: Reject prog_attach_flags array when effective query
  bpf: Ensure correct locking around vulnerable function find_vpid()
  bpf: btf: fix truncated last_member_type_id in btf_struct_resolve
  selftests/xsk: Add missing close() on netns fd
  xsk: Fix backpressure mechanism on Tx
  MAINTAINERS: Add include/linux/tnum.h to BPF CORE
====================

Link: https://lore.kernel.org/r/20221003201957.13149-1-daniel@iogearbox.net


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3a4d061c 60240bc2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -3825,6 +3825,7 @@ F: kernel/bpf/dispatcher.c
F:	kernel/bpf/trampoline.c
F:	include/linux/bpf*
F:	include/linux/filter.h
F:	include/linux/tnum.h
BPF [BTF]
M:	Martin KaFai Lau <martin.lau@linux.dev>
+1 −1
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
						struct xdp_umem *umem);
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
		  u16 queue_id, u16 flags);
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
			 struct net_device *dev, u16 queue_id);
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool);
+5 −2
Original line number Diff line number Diff line
@@ -1233,7 +1233,7 @@ enum {

/* Query effective (directly attached + inherited from ancestor cgroups)
 * programs that will be executed for events within a cgroup.
 * attach_flags with this flag are returned only for directly attached programs.
 * attach_flags with this flag are always returned 0.
 */
#define BPF_F_QUERY_EFFECTIVE	(1U << 0)

@@ -1432,7 +1432,10 @@ union bpf_attr {
		__u32		attach_flags;
		__aligned_u64	prog_ids;
		__u32		prog_cnt;
		__aligned_u64	prog_attach_flags; /* output: per-program attach_flags */
		/* output: per-program attach_flags.
		 * not allowed to be set during effective query.
		 */
		__aligned_u64	prog_attach_flags;
	} query;

	struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
+1 −1
Original line number Diff line number Diff line
@@ -3128,7 +3128,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
	if (v->next_member) {
		const struct btf_type *last_member_type;
		const struct btf_member *last_member;
		u16 last_member_type_id;
		u32 last_member_type_id;

		last_member = btf_type_member(v->t) + v->next_member - 1;
		last_member_type_id = last_member->type;
+18 −10
Original line number Diff line number Diff line
@@ -1020,6 +1020,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
			      union bpf_attr __user *uattr)
{
	__u32 __user *prog_attach_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
	bool effective_query = attr->query.query_flags & BPF_F_QUERY_EFFECTIVE;
	__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
	enum bpf_attach_type type = attr->query.attach_type;
	enum cgroup_bpf_attach_type from_atype, to_atype;
@@ -1029,8 +1030,12 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
	int total_cnt = 0;
	u32 flags;

	if (effective_query && prog_attach_flags)
		return -EINVAL;

	if (type == BPF_LSM_CGROUP) {
		if (attr->query.prog_cnt && prog_ids && !prog_attach_flags)
		if (!effective_query && attr->query.prog_cnt &&
		    prog_ids && !prog_attach_flags)
			return -EINVAL;

		from_atype = CGROUP_LSM_START;
@@ -1045,7 +1050,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
	}

	for (atype = from_atype; atype <= to_atype; atype++) {
		if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
		if (effective_query) {
			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
							      lockdep_is_held(&cgroup_mutex));
			total_cnt += bpf_prog_array_length(effective);
@@ -1054,6 +1059,8 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
		}
	}

	/* always output uattr->query.attach_flags as 0 during effective query */
	flags = effective_query ? 0 : flags;
	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
		return -EFAULT;
	if (copy_to_user(&uattr->query.prog_cnt, &total_cnt, sizeof(total_cnt)))
@@ -1068,7 +1075,7 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
	}

	for (atype = from_atype; atype <= to_atype && total_cnt; atype++) {
		if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
		if (effective_query) {
			effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
							      lockdep_is_held(&cgroup_mutex));
			cnt = min_t(int, bpf_prog_array_length(effective), total_cnt);
@@ -1090,16 +1097,17 @@ static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
				if (++i == cnt)
					break;
			}
		}

			if (prog_attach_flags) {
				flags = cgrp->bpf.flags[atype];

				for (i = 0; i < cnt; i++)
				if (copy_to_user(prog_attach_flags + i, &flags, sizeof(flags)))
					if (copy_to_user(prog_attach_flags + i,
							 &flags, sizeof(flags)))
						return -EFAULT;
				prog_attach_flags += cnt;
			}
		}

		prog_ids += cnt;
		total_cnt -= cnt;
Loading